先对新闻文本进行分词,使用的是结巴分词工具,将分词后的文本保存在seg201708.txt
,以备后期使用。
安装jieba工具包:pip install jieba
# -*- coding: utf-8 -*-
import jieba
import io
# 加载自己的自己的金融词库
jieba.load_userdict("financialWords.txt")
def main():
with io.open('news201708.txt','r',encoding='utf-8') as content:
for line in content:
seg_list = jieba.cut(line)
# print '/'.join(seg_list)
with io.open('seg201708.txt', 'a', encoding='utf-8') as output:
output.write(' '.join(seg_list))
if __name__ == '__main__':
main()
使用python的gensim包进行训练。
安装gemsim包:pip install gemsim
from gensim.models import word2vec
def main():
num_features = 300 # Word vector dimensionality
min_word_count = 10 # Minimum word count
num_workers = 16 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
sentences = word2vec.Text8Corpus("seg201708.txt")
model = word2vec.Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = context, sg = 1, sample = downsampling)
model.init_sims(replace=True)
# 保存模型,供日後使用
model.save("model201708")
# 可以在加载模型之后使用另外的句子来进一步训练模型
# model = gensim.models.Word2Vec.load('/tmp/mymodel')
# model.train(more_sentences)
if __name__ == "__main__":
main()
model = Word2Vec.load('model201708') #模型讀取方式
model.most_similar(positive=['woman', 'king'], negative=['man']) #根据给定的条件推断相似词
model.doesnt_match("breakfast cereal dinner lunch".split()) #寻找离群词
model.similarity('woman', 'man') #计算两个单词的相似度
model['computer'] #获取单词的词向量