我正在尝试变形分析包含txt文件的完整文件夹。
使用https://code.google.com/archive/p/foma/
这是我写的代码。我将每个单词传递给python中的foma fst,但是在运行了1900个文件中的143个文件后,循环被无限期地停留。我试着评论foma调用applyup在循环中,文件没有任何问题写入新文件夹(非根表单)。
class Lemmatizer:
def __init__(self, inputFolderPath=None, outputFolderPath=None, fomaBinFilePath="Konkani.bin"):
self.inputFolderPath = inputFolderPath
self.outputFolderPath = outputFolderPath
self.fomaBinFilePath=fomaBinFilePath
self.net = foma.foma_fsm_read_binary_file(fomaBinFilePath)
self.ah = foma.foma_apply_init(self.net)
def lemmatize_folder(self):
net = foma.foma_fsm_read_binary_file(self.fomaBinFilePath)
ah = foma.foma_apply_init(net)
if not os.path.exists(self.outputFolderPath):
os.makedirs(self.outputFolderPath)
for root, dirs, files in os.walk(self.inputFolderPath):
for file in filter(lambda file: file.endswith('.txt'), files):
with codecs.open(os.path.join(self.outputFolderPath, file), 'w') as outputFile:
with codecs.open(os.path.join(root, file), 'r','utf-8') as inputFile:
for line in inputFile:
for word in nltk.word_tokenize(line):
result = foma.foma_apply_up(ah, word)
# result = None
if result is not None:
print file
print result.split('+', 1)[0]
# outputFile.write(result.split('+', 1)[0])
else:
outputFile.write(word.encode('utf-8'))
outputFile.write(' ')
outputFile.write('\n')
有没有人遇到过类似的问题?是否foma fst对初始化后可以调用的次数有一些限制?