此处提出类似问题:
Finding the position of search hits from Lucene
我的问题是,该链接已有3年历史,大多数方法已被弃用,或者甚至不存在Lucene 4.4或4.5
起初我认为explain()
方法应该包含所有内容但不包括(或者我不能看到)
这是我从explain()
方法得到的。我无法真正看到有关职位的任何内容:
0.40144306 = (MATCH) sum of:
0.13381435 = (MATCH) weight(contents:inb344 in 52) [DefaultSimilarity], result of:
0.13381435 = score(doc=52,freq=1.0 = termFreq=1.0
), product of:
0.4472136 = queryWeight, product of:
9.574976 = idf(docFreq=44, maxDocs=238384)
0.046706498 = queryNorm
0.299218 = fieldWeight in 52, product of:
1.0 = tf(freq=1.0), with freq of:
1.0 = termFreq=1.0
9.574976 = idf(docFreq=44, maxDocs=238384)
0.03125 = fieldNorm(doc=52)
0.2676287 = (MATCH) weight(contents:inb344^2.0 in 52) [DefaultSimilarity], result of:
0.2676287 = score(doc=52,freq=1.0 = termFreq=1.0
), product of:
0.8944272 = queryWeight, product of:
2.0 = boost
9.574976 = idf(docFreq=44, maxDocs=238384)
0.046706498 = queryNorm
0.299218 = fieldWeight in 52, product of:
1.0 = tf(freq=1.0), with freq of:
1.0 = termFreq=1.0
9.574976 = idf(docFreq=44, maxDocs=238384)
0.03125 = fieldNorm(doc=52)
有没有办法查看给定文档的查询现有位置(可能是开始和结束位置)?
答案 0 :(得分:3)
我在这里找到了答案:
http://www.computergodzilla.blogspot.com/2013/07/how-to-use-lucene-highlighter.html
基本上它使用的是4.2版,它的工作原理非常好!
以下是代码(以防万一):
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.computergodzilla.highlighter;
import java.io.File;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.search.highlight.TokenSources;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
/**
* Example of Lucene Highlighter
* @author Mubin Shrestha
*/
public class LuceneHighlighter {
public void highLighter() throws IOException, ParseException, InvalidTokenOffsetsException {
IndexReader reader = DirectoryReader.open(FSDirectory.open(new File("D:/INDEXDIRECTORY")));
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_42);
IndexSearcher searcher = new IndexSearcher(reader);
QueryParser parser = new QueryParser(Version.LUCENE_42, "ncontent", analyzer);
Query query = parser.parse("going");
TopDocs hits = searcher.search(query, reader.maxDoc());
System.out.println(hits.totalHits);
SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter();
Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));
for (int i = 0; i < reader.maxDoc(); i++) {
int id = hits.scoreDocs[i].doc;
Document doc = searcher.doc(id);
String text = doc.get("ncontent");
TokenStream tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), id, "ncontent", analyzer);
TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, text, false, 4);
for (int j = 0; j < frag.length; j++) {
if ((frag[j] != null) && (frag[j].getScore() > 0)) {
System.out.println((frag[j].toString()));
}
}
//Term vector
text = doc.get("content");
tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), hits.scoreDocs[i].doc, "content", analyzer);
frag = highlighter.getBestTextFragments(tokenStream, text, false, 4);
for (int j = 0; j < frag.length; j++) {
if ((frag[j] != null) && (frag[j].getScore() > 0)) {
System.out.println((frag[j].toString()));
}
}
}
}
}