我遇到了试图运行实体提取功能的问题。我相信这是版本差异。以下工作示例在2.0.4中运行,但不在3.0中运行。我确实将一个函数调用:batch_ne_chunk更改为:nltk.ne_chunk_sents以防止在3.0中抛出错误。
def package_get_entities(self,text):
#text = text[0:300]
entity_names = []
chunked = self.get_chunked_sentences(text)
for tree in chunked:
entity_names.extend(self.extract_entity_names(tree))
entity_names = list(set(entity_names))
return entity_names
def get_chunked_sentences(self,text):
sentences = nltk.sent_tokenize(text)
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
chunked_sentences = nltk.ne_chunk_sents(tagged_sentences, binary=True)
return chunked_sentences
def extract_entity_names(self,t):
entity_names = []
if hasattr(t, 'node') and t.node:
if t.node == 'NE':
entity_names.append(' '.join([child[0] for child in t]))
else:
for child in t:
entity_names.extend(self.extract_entity_names(child))
return entity_names
运行func:
str = 'this is some text about a man named Abraham Lincoln'
entArray = package_get_entities(str)
2.0.4输出[亚伯拉罕林肯] 在3.0输出[]
答案 0 :(得分:1)
我不得不改写:
if hasattr(t, 'node') and t.node:
要:
if hasattr(t, 'label'):