所以,我复制了一个关于如何创建一个可以运行tf-idf的系统的源代码,这里是代码:
#module import
from __future__ import division, unicode_literals
import math
import string
import re
import os
from text.blob import TextBlob as tb
#create a new array
words = {}
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
regex = re.compile('[%s]' % re.escape(string.punctuation))
f = open('D:/article/sport/a.txt','r')
var = f.read()
var = regex.sub(' ', var)
var = var.lower()
document1 = tb(var)
f = open('D:/article/food/b.txt','r')
var = f.read()
var = var.lower()
document2 = tb(var)
bloblist = [document1, document2]
for i, blob in enumerate(bloblist):
print("Top words in document {}".format(i + 1))
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:50]:
print("Word: {}, TF-IDF: {}".format(word, round(score, 5)))
但是,问题是,我想将所有文件放在一个语料库中的运动文件夹中 食物文件夹中的食物文章进入另一个语料库,因此系统会给每个语料库一个结果。现在,我只能比较文件,但我想在语料库之间进行比较。我很抱歉提出这个问题,任何帮助都会得到满足。
由于
答案 0 :(得分:0)
我得到的是,你要计算两个文件的单词频率并将它们存储在不同的文件中进行比较,为此,你可以使用终端。以下是计算单词频率的简单代码
import string
import collections
import operator
keywords = []
i=0
def removePunctuation(sentence):
sentence = sentence.lower()
new_sentence = ""
for char in sentence:
if char not in string.punctuation:
new_sentence = new_sentence + char
return new_sentence
def wordFrequences(sentence):
global i
wordFreq = {}
split_sentence = new_sentence.split()
for word in split_sentence:
wordFreq[word] = wordFreq.get(word,0) + 1
wordFreq.items()
# od = collections.OrderedDict(sorted(wordFreq.items(),reverse=True))
# print od
sorted_x= sorted(wordFreq.iteritems(), key=operator.itemgetter(1),reverse = True)
print sorted_x
for key, value in sorted_x:
keywords.append(key)
print keywords
f = open('D:/article/sport/a.txt','r')
sentence = f.read()
# sentence = "The first test of the function some some some some"
new_sentence = removePunctuation(sentence)
wordFrequences(new_sentence)
你必须通过更改文本文件的路径来运行此代码两次,并且每次从console pass命令运行代码时都要这样
python abovecode.py > destinationfile.txt
就像你的情况一样
python abovecode.py > sportfolder/file1.txt
python abovecode.py > foodfolder/file2.txt
imp:如果你想要带有频率的单词,则省略
部分print keywords
imp:如果你需要单词acc。他们的频率然后省略
print sorted_x