如何处理文本文件并使用斯坦福共同引用解析找到共同引用?

时间:2019-01-23 08:02:14

标签: java stanford-nlp opennlp

我想标记文件中的内容并从中查找位置。但是我对此并不陌生。我只是尝试将文件而不是文本提供给 BufferedReader fr=new BufferedReader(new FileReader("../../mytext.txt"));
Annotation d = new Annotation((List<CoreMap>) fr); pipeline.annotate((Iterable<Annotation>) d);

但是失败了,请您帮我

1 个答案:

答案 0 :(得分:0)

import edu.stanford.nlp.coref.data.CorefChain;
import edu.stanford.nlp.ling.*;
import edu.stanford.nlp.ie.util.*;
import edu.stanford.nlp.pipeline.*;
import edu.stanford.nlp.semgraph.*;
import edu.stanford.nlp.trees.*;
import java.util.*;


public class BasicPipelineExample {

  public static String text = "Joe Smith was born in California. " +
      "In 2017, he went to Paris, France in the summer. " +
      "His flight left at 3:00pm on July 10th, 2017. " +
      "After eating some escargot for the first time, Joe said, \"That was delicious!\" " +
      "He sent a postcard to his sister Jane Smith. " +
      "After hearing about Joe's trip, Jane decided she might go to France one day.";

  public static void main(String[] args) throws IOException {
    // set up pipeline properties
    Properties props = new Properties();
    // set the list of annotators to run
    props.setProperty("annotators", "tokenize,ssplit,pos,lemma,ner,parse,depparse,coref");
    // set a property for an annotator, in this case the coref annotator is being set to use the neural algorithm
    props.setProperty("coref.algorithm", "neural");
    // build pipeline
    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
    // create a document object
    CoreDocument document = new CoreDocument(text);
    // annnotate the document
    pipeline.annotate(document);
    // examples

    // coreference between entity mentions
    CoreEntityMention originalEntityMention = document.sentences().get(3).entityMentions().get(1);
    System.out.println("Example: original entity mention");
    System.out.println(originalEntityMention);
    System.out.println("Example: canonical entity mention");
    System.out.println(originalEntityMention.canonicalEntityMention().get());
    System.out.println();

    // get document wide coref info
    Map<Integer, CorefChain> corefChains = document.corefChains();
    System.out.println("Example: coref chains for document");
    System.out.println(corefChains);
    System.out.println();

    // write text version to file
    FileOutputStream fos = new FileOutputStream(new File("output.txt"));
    TextOutputter.prettyPrint(document.annotation(), fos, pipeline);

  }

}