整理した

This commit is contained in:
syuilo
2018-03-29 20:32:18 +09:00
parent 8a279a4656
commit cf33e483f7
552 changed files with 360 additions and 1311 deletions

View File

@ -1,49 +0,0 @@
const bayes = require('./naive-bayes.js');
const MeCab = require('./mecab');
import Post from '../../server/api/models/post';
/**
* 投稿を学習したり与えられた投稿のカテゴリを予測します
*/
export default class Categorizer {
private classifier: any;
private mecab: any;
constructor() {
this.mecab = new MeCab();
// BIND -----------------------------------
this.tokenizer = this.tokenizer.bind(this);
}
private tokenizer(text: string) {
const tokens = this.mecab.parseSync(text)
// 名詞だけに制限
.filter(token => token[1] === '名詞')
// 取り出し
.map(token => token[0]);
return tokens;
}
public async init() {
this.classifier = bayes({
tokenizer: this.tokenizer
});
// 訓練データ取得
const verifiedPosts = await Post.find({
is_category_verified: true
});
// 学習
verifiedPosts.forEach(post => {
this.classifier.learn(post.text, post.category);
});
}
public async predict(text) {
return this.classifier.categorize(text);
}
}

View File

@ -1,120 +0,0 @@
import * as URL from 'url';
import Post from '../../server/api/models/post';
import User from '../../server/api/models/user';
import parse from '../../server/api/common/text';
process.on('unhandledRejection', console.dir);
function tokenize(text: string) {
if (text == null) return [];
// パース
const ast = parse(text);
const domains = ast
// URLを抽出
.filter(t => t.type == 'url' || t.type == 'link')
.map(t => URL.parse(t.url).hostname);
return domains;
}
// Fetch all users
User.find({}, {
fields: {
_id: true
}
}).then(users => {
let i = -1;
const x = cb => {
if (++i == users.length) return cb();
extractDomainsOne(users[i]._id).then(() => x(cb), err => {
console.error(err);
setTimeout(() => {
i--;
x(cb);
}, 1000);
});
};
x(() => {
console.log('complete');
});
});
function extractDomainsOne(id) {
return new Promise(async (resolve, reject) => {
process.stdout.write(`extracting domains of ${id} ...`);
// Fetch recent posts
const recentPosts = await Post.find({
userId: id,
text: {
$exists: true
}
}, {
sort: {
_id: -1
},
limit: 10000,
fields: {
_id: false,
text: true
}
});
// 投稿が少なかったら中断
if (recentPosts.length < 100) {
process.stdout.write(' >>> -\n');
return resolve();
}
const domains = {};
// Extract domains from recent posts
recentPosts.forEach(post => {
const domainsOfPost = tokenize(post.text);
domainsOfPost.forEach(domain => {
if (domains[domain]) {
domains[domain]++;
} else {
domains[domain] = 1;
}
});
});
// Calc peak
let peak = 0;
Object.keys(domains).forEach(domain => {
if (domains[domain] > peak) peak = domains[domain];
});
// Sort domains by frequency
const domainsSorted = Object.keys(domains).sort((a, b) => domains[b] - domains[a]);
// Lookup top 10 domains
const topDomains = domainsSorted.slice(0, 10);
process.stdout.write(' >>> ' + topDomains.join(', ') + '\n');
// Make domains object (includes weights)
const domainsObj = topDomains.map(domain => ({
domain: domain,
weight: domains[domain] / peak
}));
// Save
User.update({ _id: id }, {
$set: {
domains: domainsObj
}
}).then(() => {
resolve();
}, err => {
reject(err);
});
});
}

View File

@ -1,154 +0,0 @@
const moji = require('moji');
const MeCab = require('./mecab');
import Post from '../../server/api/models/post';
import User from '../../server/api/models/user';
import parse from '../../server/api/common/text';
process.on('unhandledRejection', console.dir);
const stopwords = [
'ー',
'の', 'に', 'は', 'を', 'た', 'が', 'で', 'て', 'と', 'し', 'れ', 'さ',
'ある', 'いる', 'も', 'する', 'から', 'な', 'こと', 'として', 'い', 'や', 'れる',
'など', 'なっ', 'ない', 'この', 'ため', 'その', 'あっ', 'よう', 'また', 'もの',
'という', 'あり', 'まで', 'られ', 'なる', 'へ', 'か', 'だ', 'これ', 'によって',
'により', 'おり', 'より', 'による', 'ず', 'なり', 'られる', 'において', 'ば', 'なかっ',
'なく', 'しかし', 'について', 'せ', 'だっ', 'その後', 'できる', 'それ', 'う', 'ので',
'なお', 'のみ', 'でき', 'き', 'つ', 'における', 'および', 'いう', 'さらに', 'でも',
'ら', 'たり', 'その他', 'に関する', 'たち', 'ます', 'ん', 'なら', 'に対して', '特に',
'せる', '及び', 'これら', 'とき', 'では', 'にて', 'ほか', 'ながら', 'うち', 'そして',
'とともに', 'ただし', 'かつて', 'それぞれ', 'または', 'お', 'ほど', 'ものの', 'に対する',
'ほとんど', 'と共に', 'といった', 'です', 'とも', 'ところ', 'ここ', '感じ', '気持ち',
'あと', '自分', 'すき', '()',
'about', 'after', 'all', 'also', 'am', 'an', 'and', 'another', 'any', 'are', 'as', 'at', 'be',
'because', 'been', 'before', 'being', 'between', 'both', 'but', 'by', 'came', 'can',
'come', 'could', 'did', 'do', 'each', 'for', 'from', 'get', 'got', 'has', 'had',
'he', 'have', 'her', 'here', 'him', 'himself', 'his', 'how', 'if', 'in', 'into',
'is', 'it', 'like', 'make', 'many', 'me', 'might', 'more', 'most', 'much', 'must',
'my', 'never', 'now', 'of', 'on', 'only', 'or', 'other', 'our', 'out', 'over',
'said', 'same', 'see', 'should', 'since', 'some', 'still', 'such', 'take', 'than',
'that', 'the', 'their', 'them', 'then', 'there', 'these', 'they', 'this', 'those',
'through', 'to', 'too', 'under', 'up', 'very', 'was', 'way', 'we', 'well', 'were',
'what', 'where', 'which', 'while', 'who', 'with', 'would', 'you', 'your', 'a', 'i'
];
const mecab = new MeCab();
function tokenize(text: string) {
if (text == null) return [];
// パース
const ast = parse(text);
const plain = ast
// テキストのみ(URLなどを除外するという意)
.filter(t => t.type == 'text' || t.type == 'bold')
.map(t => t.content)
.join('');
const tokens = mecab.parseSync(plain)
// キーワードのみ
.filter(token => token[1] == '名詞' && (token[2] == '固有名詞' || token[2] == '一般'))
// 取り出し(&整形(全角を半角にしたり大文字を小文字で統一したり))
.map(token => moji(token[0]).convert('ZE', 'HE').convert('HK', 'ZK').toString().toLowerCase())
// ストップワードなど
.filter(word =>
stopwords.indexOf(word) === -1 &&
word.length > 1 &&
word.indexOf('') === -1 &&
word.indexOf('!') === -1 &&
word.indexOf('') === -1 &&
word.indexOf('?') === -1);
return tokens;
}
// Fetch all users
User.find({}, {
fields: {
_id: true
}
}).then(users => {
let i = -1;
const x = cb => {
if (++i == users.length) return cb();
extractKeywordsOne(users[i]._id).then(() => x(cb), err => {
console.error(err);
setTimeout(() => {
i--;
x(cb);
}, 1000);
});
};
x(() => {
console.log('complete');
});
});
function extractKeywordsOne(id) {
return new Promise(async (resolve, reject) => {
process.stdout.write(`extracting keywords of ${id} ...`);
// Fetch recent posts
const recentPosts = await Post.find({
userId: id,
text: {
$exists: true
}
}, {
sort: {
_id: -1
},
limit: 10000,
fields: {
_id: false,
text: true
}
});
// 投稿が少なかったら中断
if (recentPosts.length < 300) {
process.stdout.write(' >>> -\n');
return resolve();
}
const keywords = {};
// Extract keywords from recent posts
recentPosts.forEach(post => {
const keywordsOfPost = tokenize(post.text);
keywordsOfPost.forEach(keyword => {
if (keywords[keyword]) {
keywords[keyword]++;
} else {
keywords[keyword] = 1;
}
});
});
// Sort keywords by frequency
const keywordsSorted = Object.keys(keywords).sort((a, b) => keywords[b] - keywords[a]);
// Lookup top 10 keywords
const topKeywords = keywordsSorted.slice(0, 10);
process.stdout.write(' >>> ' + topKeywords.join(', ') + '\n');
// Save
User.update({ _id: id }, {
$set: {
keywords: topKeywords
}
}).then(() => {
resolve();
}, err => {
reject(err);
});
});
}

View File

@ -1,85 +0,0 @@
// Original source code: https://github.com/hecomi/node-mecab-async
// CUSTOMIZED BY SYUILO
var exec = require('child_process').exec;
var execSync = require('child_process').execSync;
var sq = require('shell-quote');
const config = require('../../conf').default;
// for backward compatibility
var MeCab = function() {};
MeCab.prototype = {
command : config.analysis.mecab_command ? config.analysis.mecab_command : 'mecab',
_format: function(arrayResult) {
var result = [];
if (!arrayResult) { return result; }
// Reference: http://mecab.googlecode.com/svn/trunk/mecab/doc/index.html
// 表層形\t品詞,品詞細分類1,品詞細分類2,品詞細分類3,活用形,活用型,原形,読み,発音
arrayResult.forEach(function(parsed) {
if (parsed.length <= 8) { return; }
result.push({
kanji : parsed[0],
lexical : parsed[1],
compound : parsed[2],
compound2 : parsed[3],
compound3 : parsed[4],
conjugation : parsed[5],
inflection : parsed[6],
original : parsed[7],
reading : parsed[8],
pronunciation : parsed[9] || ''
});
});
return result;
},
_shellCommand : function(str) {
return sq.quote(['echo', str]) + ' | ' + this.command;
},
_parseMeCabResult : function(result) {
return result.split('\n').map(function(line) {
return line.replace('\t', ',').split(',');
});
},
parse : function(str, callback) {
process.nextTick(function() { // for bug
exec(MeCab._shellCommand(str), function(err, result) {
if (err) { return callback(err); }
callback(err, MeCab._parseMeCabResult(result).slice(0,-2));
});
});
},
parseSync : function(str) {
var result = execSync(MeCab._shellCommand(str));
return MeCab._parseMeCabResult(String(result)).slice(0, -2);
},
parseFormat : function(str, callback) {
MeCab.parse(str, function(err, result) {
if (err) { return callback(err); }
callback(err, MeCab._format(result));
});
},
parseSyncFormat : function(str) {
return MeCab._format(MeCab.parseSync(str));
},
_wakatsu : function(arr) {
return arr.map(function(data) { return data[0]; });
},
wakachi : function(str, callback) {
MeCab.parse(str, function(err, arr) {
if (err) { return callback(err); }
callback(null, MeCab._wakatsu(arr));
});
},
wakachiSync : function(str) {
var arr = MeCab.parseSync(str);
return MeCab._wakatsu(arr);
}
};
for (var x in MeCab.prototype) {
MeCab[x] = MeCab.prototype[x];
}
module.exports = MeCab;

View File

@ -1,302 +0,0 @@
// Original source code: https://github.com/ttezel/bayes/blob/master/lib/naive_bayes.js (commit: 2c20d3066e4fc786400aaedcf3e42987e52abe3c)
// CUSTOMIZED BY SYUILO
/*
Expose our naive-bayes generator function
*/
module.exports = function (options) {
return new Naivebayes(options)
}
// keys we use to serialize a classifier's state
var STATE_KEYS = module.exports.STATE_KEYS = [
'categories', 'docCount', 'totalDocuments', 'vocabulary', 'vocabularySize',
'wordCount', 'wordFrequencyCount', 'options'
];
/**
* Initializes a NaiveBayes instance from a JSON state representation.
* Use this with classifier.toJson().
*
* @param {String} jsonStr state representation obtained by classifier.toJson()
* @return {NaiveBayes} Classifier
*/
module.exports.fromJson = function (jsonStr) {
var parsed;
try {
parsed = JSON.parse(jsonStr)
} catch (e) {
throw new Error('Naivebayes.fromJson expects a valid JSON string.')
}
// init a new classifier
var classifier = new Naivebayes(parsed.options)
// override the classifier's state
STATE_KEYS.forEach(function (k) {
if (!parsed[k]) {
throw new Error('Naivebayes.fromJson: JSON string is missing an expected property: `'+k+'`.')
}
classifier[k] = parsed[k]
})
return classifier
}
/**
* Given an input string, tokenize it into an array of word tokens.
* This is the default tokenization function used if user does not provide one in `options`.
*
* @param {String} text
* @return {Array}
*/
var defaultTokenizer = function (text) {
//remove punctuation from text - remove anything that isn't a word char or a space
var rgxPunctuation = /[^(a-zA-ZA-Яa-я0-9_)+\s]/g
var sanitized = text.replace(rgxPunctuation, ' ')
return sanitized.split(/\s+/)
}
/**
* Naive-Bayes Classifier
*
* This is a naive-bayes classifier that uses Laplace Smoothing.
*
* Takes an (optional) options object containing:
* - `tokenizer` => custom tokenization function
*
*/
function Naivebayes (options) {
// set options object
this.options = {}
if (typeof options !== 'undefined') {
if (!options || typeof options !== 'object' || Array.isArray(options)) {
throw TypeError('NaiveBayes got invalid `options`: `' + options + '`. Pass in an object.')
}
this.options = options
}
this.tokenizer = this.options.tokenizer || defaultTokenizer
//initialize our vocabulary and its size
this.vocabulary = {}
this.vocabularySize = 0
//number of documents we have learned from
this.totalDocuments = 0
//document frequency table for each of our categories
//=> for each category, how often were documents mapped to it
this.docCount = {}
//for each category, how many words total were mapped to it
this.wordCount = {}
//word frequency table for each category
//=> for each category, how frequent was a given word mapped to it
this.wordFrequencyCount = {}
//hashmap of our category names
this.categories = {}
}
/**
* Initialize each of our data structure entries for this new category
*
* @param {String} categoryName
*/
Naivebayes.prototype.initializeCategory = function (categoryName) {
if (!this.categories[categoryName]) {
this.docCount[categoryName] = 0
this.wordCount[categoryName] = 0
this.wordFrequencyCount[categoryName] = {}
this.categories[categoryName] = true
}
return this
}
/**
* train our naive-bayes classifier by telling it what `category`
* the `text` corresponds to.
*
* @param {String} text
* @param {String} class
*/
Naivebayes.prototype.learn = function (text, category) {
var self = this
//initialize category data structures if we've never seen this category
self.initializeCategory(category)
//update our count of how many documents mapped to this category
self.docCount[category]++
//update the total number of documents we have learned from
self.totalDocuments++
//normalize the text into a word array
var tokens = self.tokenizer(text)
//get a frequency count for each token in the text
var frequencyTable = self.frequencyTable(tokens)
/*
Update our vocabulary and our word frequency count for this category
*/
Object
.keys(frequencyTable)
.forEach(function (token) {
//add this word to our vocabulary if not already existing
if (!self.vocabulary[token]) {
self.vocabulary[token] = true
self.vocabularySize++
}
var frequencyInText = frequencyTable[token]
//update the frequency information for this word in this category
if (!self.wordFrequencyCount[category][token])
self.wordFrequencyCount[category][token] = frequencyInText
else
self.wordFrequencyCount[category][token] += frequencyInText
//update the count of all words we have seen mapped to this category
self.wordCount[category] += frequencyInText
})
return self
}
/**
* Determine what category `text` belongs to.
*
* @param {String} text
* @return {String} category
*/
Naivebayes.prototype.categorize = function (text) {
var self = this
, maxProbability = -Infinity
, chosenCategory = null
var tokens = self.tokenizer(text)
var frequencyTable = self.frequencyTable(tokens)
//iterate thru our categories to find the one with max probability for this text
Object
.keys(self.categories)
.forEach(function (category) {
//start by calculating the overall probability of this category
//=> out of all documents we've ever looked at, how many were
// mapped to this category
var categoryProbability = self.docCount[category] / self.totalDocuments
//take the log to avoid underflow
var logProbability = Math.log(categoryProbability)
//now determine P( w | c ) for each word `w` in the text
Object
.keys(frequencyTable)
.forEach(function (token) {
var frequencyInText = frequencyTable[token]
var tokenProbability = self.tokenProbability(token, category)
// console.log('token: %s category: `%s` tokenProbability: %d', token, category, tokenProbability)
//determine the log of the P( w | c ) for this word
logProbability += frequencyInText * Math.log(tokenProbability)
})
if (logProbability > maxProbability) {
maxProbability = logProbability
chosenCategory = category
}
})
return chosenCategory
}
/**
* Calculate probability that a `token` belongs to a `category`
*
* @param {String} token
* @param {String} category
* @return {Number} probability
*/
Naivebayes.prototype.tokenProbability = function (token, category) {
//how many times this word has occurred in documents mapped to this category
var wordFrequencyCount = this.wordFrequencyCount[category][token] || 0
//what is the count of all words that have ever been mapped to this category
var wordCount = this.wordCount[category]
//use laplace Add-1 Smoothing equation
return ( wordFrequencyCount + 1 ) / ( wordCount + this.vocabularySize )
}
/**
* Build a frequency hashmap where
* - the keys are the entries in `tokens`
* - the values are the frequency of each entry in `tokens`
*
* @param {Array} tokens Normalized word array
* @return {Object}
*/
Naivebayes.prototype.frequencyTable = function (tokens) {
var frequencyTable = Object.create(null)
tokens.forEach(function (token) {
if (!frequencyTable[token])
frequencyTable[token] = 1
else
frequencyTable[token]++
})
return frequencyTable
}
/**
* Dump the classifier's state as a JSON string.
* @return {String} Representation of the classifier.
*/
Naivebayes.prototype.toJson = function () {
var state = {}
var self = this
STATE_KEYS.forEach(function (k) {
state[k] = self[k]
})
var jsonStr = JSON.stringify(state)
return jsonStr
}
// (original method)
Naivebayes.prototype.export = function () {
var state = {}
var self = this
STATE_KEYS.forEach(function (k) {
state[k] = self[k]
})
return state
}
module.exports.import = function (data) {
var parsed = data
// init a new classifier
var classifier = new Naivebayes()
// override the classifier's state
STATE_KEYS.forEach(function (k) {
if (!parsed[k]) {
throw new Error('Naivebayes.import: data is missing an expected property: `'+k+'`.')
}
classifier[k] = parsed[k]
})
return classifier
}

View File

@ -1,35 +0,0 @@
import Post from '../../server/api/models/post';
import Core from './core';
const c = new Core();
c.init().then(() => {
// 全ての(人間によって証明されていない)投稿を取得
Post.find({
text: {
$exists: true
},
is_category_verified: {
$ne: true
}
}, {
sort: {
_id: -1
},
fields: {
_id: true,
text: true
}
}).then(posts => {
posts.forEach(post => {
console.log(`predicting... ${post._id}`);
const category = c.predict(post.text);
Post.update({ _id: post._id }, {
$set: {
category: category
}
});
});
});
});

View File

@ -1,45 +0,0 @@
import Post from '../../server/api/models/post';
import User from '../../server/api/models/user';
export async function predictOne(id) {
console.log(`predict interest of ${id} ...`);
// TODO: repostなども含める
const recentPosts = await Post.find({
userId: id,
category: {
$exists: true
}
}, {
sort: {
_id: -1
},
limit: 1000,
fields: {
_id: false,
category: true
}
});
const categories = {};
recentPosts.forEach(post => {
if (categories[post.category]) {
categories[post.category]++;
} else {
categories[post.category] = 1;
}
});
}
export async function predictAll() {
const allUsers = await User.find({}, {
fields: {
_id: true
}
});
allUsers.forEach(user => {
predictOne(user._id);
});
}