Python代码花费超过15分钟才能生成输出

时间:2018-10-01 14:39:52

标签: python performance optimization data-mining tf-idf

import os,re
import math
from math import log10
import nltk.corpus
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from collections import defaultdict
python_file_root = './presidential_debates'

def getidf(token):
    document_occurance = 0
    for filename in os.listdir(python_file_root):
        file = open(os.path.join(python_file_root, filename), "r")
        for line in file:
            if re.search(r'\b' +token+ r'\b', line):
                document_occurance = document_occurance + 1
                break      
    if (document_occurance != 0):
        idf = log10(30 / document_occurance)                   
        return idf
    return -1

def normalize(filename,token):
    file = open(os.path.join(python_file_root, filename), "r")
    counts = dict()
    square = []
    count1 = 0
    for line in file:
        count1 = count1 + 1
        if line in counts:
            counts[line] += 1
        else:
            counts[line] = 1
    for key,value in counts.items():
        tf = 1 +log10(value)
        idf = getidf(key.rstrip())
        square.append((tf * idf)*(tf * idf))
    summ = sum(square)
    sqroot = math.sqrt(summ) 
    return sqroot

def getweight(filename,token):
    hit_count1 = 0
    final = 0
    file = open(os.path.join(python_file_root, filename), "r")
    idft = getidf(token)
    for line in file:
        if re.search(r'\b' +token+ r'\b', line):
            hit_count1 = hit_count1 + 1
    if (hit_count1 == 0):
        return 0
    else:    
        tf = 1 + log10(hit_count1)
    initial = idft * tf
    if(initial <= 0):
        final = 0
        return final
    else:
        normalize_fact = normalize(filename,token)
        final = initial / normalize_fact
        return final  

for filename in os.listdir(python_file_root):
    file = open(os.path.join(python_file_root, filename), "r")
    doc = file.read() 
    doc = doc.lower()
    stemmed = []
    tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
    tokens = tokenizer.tokenize(doc)
    stoplist = stopwords.words('english')
    stop_removed = [word for word in tokens if word not in stoplist]
    with open(os.path.join(python_file_root, filename), "w") as f:
        for item in stop_removed:
            stemmer = PorterStemmer()
            stemmed = [stemmer.stem(item)]
            for items in stemmed:
                f.write("%s\n" % items)
print("\nIDF\n")
print("%.12f" % getidf("health"))
print("%.12f" % getidf("agenda"))
print("%.12f" % getidf("vector"))
print("%.12f" % getidf("reason"))
print("%.12f" % getidf("hispan"))
print("%.12f" % getidf("hispanic"))
print("\n")
print("%.12f" % getweight("2012-10-03.txt","health"))
print("%.12f" % getweight("1960-10-21.txt","reason"))
print("%.12f" % getweight("1976-10-22.txt","agenda"))
print("%.12f" % getweight("2012-10-16.txt","hispan"))
print("%.12f" % getweight("2012-10-16.txt","hispanic"))

我有30个txt文件,并且我已经开发了一个程序来查找idf和标准化的tf-idf向量。我得到了正确的值,但函数getweight花费了超过15分钟的时间来生成输出。谁能建议我一些优化方法。 我不想使用任何其他非标准的Python包。

1 个答案:

答案 0 :(得分:0)

为什么要为每个单词创建一个 new PorterStemmer?

除了这个显而易见的事情之外,请尝试对代码进行性能分析。 NLTI的名声很慢-因此,这可能不是您的错。如果您进行了简介,那么您就会知道。