import gensim
from gensim.models.doc2vec import TaggedDocument
taggeddocs = []
tag2tweetmap = {}
for index,i in enumerate(cleaned_tweets):
if len(i) > 2: # Non empty tweets
tag = u'SENT_{:d}'.format(index)
sentence = TaggedDocument(words=gensim.utils.to_unicode(i).split(), tags=[tag])
tag2tweetmap[tag] = i
taggeddocs.append(sentence)
model = gensim.models.Doc2Vec(taggeddocs, dm=0, alpha=0.025, size=20, min_alpha=0.025, min_count=0)
for epoch in range(60):
if epoch % 20 == 0:
print('Now training epoch %s' % epoch)
model.train(taggeddocs,total_examples=model.corpus_count,epochs=model.iter)
model.alpha -= 0.002
model.min_alpha = model.alpha
from sklearn.cluster import KMeans
dataSet = model.syn0
kmeansClustering = KMeans(n_clusters=6)
centroidIndx = kmeansClustering.fit_predict(dataSet)
topic2wordsmap = {}
for i, val in enumerate(dataSet):
tag = model.docvecs.index_to_doctag(i)
topic = centroidIndx[i]
if topic in topic2wordsmap.keys():
for w in (tag2tweetmap[tag].split()):
topic2wordsmap[topic].append(w)
else:
topic2wordsmap[topic] = []
for i in topic2wordsmap:
words = topic2wordsmap[i]
print("Topic {} has words {}".format(i, words[:5]))
所以我试图使用doc2vec方法找出最常用的单词和主题列表。 它是属性错误,说" Doc2Vec没有属性syn0",我不知道如何处理它。
答案 0 :(得分:0)
我发现这个doc2vec教程可能会给你一些关于你的问题的线索。
https://medium.com/@mishra.thedeepak/doc2vec-simple-implementation-example-df2afbbfbad5