我已经在pyspark上基于MapReduce流程编码了TF.IDF算法。 我希望输出看起来像[[word,tf_idf score),(word,tf_idf score),(word,tf_idf score)]。
每个单词都应该是唯一的(尽管它在文本中多次出现,但最终应该只出现一次),我想通过“ ReduceByKey”功能来做到这一点,但是它无法正常工作。
此外,现在输出显示的是字母而不是单词,出于某种原因,我无法调试。
您能解释一下代码中我想念的内容吗?
非常感谢
import string
import numpy as np
list_punct=list(string.punctuation)
text = '/dbfs/FileStore/tables/full_text.txt'
text_rdd = sc.parallelize(text)
filtered_data = text_rdd. \
map(lambda x: x.strip()). \
filter(lambda x: len(x) != 0). \
map(lambda punct : ''.join([txt.lower() for txt in punct if txt not in list_punct]))
number_of_docs = filtered_data.count()
doc_with_id = filtered_data.zipWithIndex()
tokenized_text = doc_with_id.map(lambda x: (x[1], x[0].split()) )
term_count = tokenized_text.flatMapValues(lambda x: x).countByValue()
term_document_count = tokenized_text.flatMapValues(lambda x: x).distinct()\
.map(lambda x: (x[1], x[0])).countByKey()
def tf_idf(N, term_freq, term_document_count):
result = []
for key, value in term_freq.items():
doc = key[0]
term = key[1]
df = term_document_count[term]
if (df>0):
tf_idf = float(value)*np.log(number_of_docs/df)
result.append({"doc":doc, "term":term, "score":tf_idf})
return result
tf_idf_output = tf_idf(number_of_docs, term_count, term_document_count)
tf_idf_output[:10]