我正在尝试将(多个单词)标签列表与每个Document相关联。 因此,对于每个文档,我使用"标记"添加多个StringField条目。 as fieldName。
搜索时,我希望得分与我成功匹配的标签比例成正比,例如:
但似乎分数中没有考虑标签的数量。
在测试这四个文件时:
- tags.put("doc1", "piano, electric guitar, violon");
- tags.put("doc2", "piano, electric guitar");
- tags.put("doc3", "piano");
- tags.put("doc4", "electric guitar");
我得到的是:
- Score : 1.0
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc4> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
- Score : 1.0
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc2> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
- Score : 1.0
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc1> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:violon>>
如何更改此行为?我错过了正确的做事方式吗?
以下是我的测试代码。
致以最诚挚的问候,
雷诺
public class LuceneQueryTest {
Analyzer analyzer;
BasicIndex basicIndex;
LinkedList<String> phrases;
Query query;
Map<Document, Float> results;
@Test
public void testListOfTags() throws Exception {
analyzer = new StandardAnalyzer();
basicIndex = new BasicIndex(analyzer);
Map<String, String> tags = new HashMap();
tags.put("doc1", "piano, electric guitar, violon");
tags.put("doc2", "piano, electric guitar");
tags.put("doc3", "piano");
tags.put("doc4", "electric guitar");
Queue<String> queue = new LinkedList<>();
queue.addAll(tags.keySet());
basicIndex.index(new Supplier<Document>() {
public Document get() {
Document doc = new Document();
if (queue.isEmpty()) {
return null;
}
String docName = queue.poll();
System.out.println("**** "+docName);
String tag = tags.get(docName);
doc.add(new StringField("id", docName, Field.Store.YES));
for (String tagItem : tag.split("\\,")) {
System.out.println(tagItem);
Field tagField;
tagField = new StringField("tag",tagItem,Field.Store.YES);
System.out.println(tagField);
doc.add(tagField);
}
return doc;
}
});
BooleanQuery booleanQuery = new BooleanQuery();
//booleanQuery.add(new TermQuery(new Term("tag", "piano")), BooleanClause.Occur.SHOULD);
booleanQuery.add(new TermQuery(new Term("tag", "electric guitar")), BooleanClause.Occur.SHOULD);
//Query parsedQuery = new QueryParser("tag", analyzer).parse("tag:\"electric guitar\"");
query = booleanQuery;
//query = parsedQuery;
System.out.println(query);
results = basicIndex.search(query);
displayResults(results);
System.out.println(Arrays.toString(basicIndex.document(3).getValues("tag")));
}
private void displayResults(Map<Document, Float> results) {
results.forEach((Document doc, Float score) -> {
System.out.println("Score : " + score + " \n Doc : " + doc);
});
}
}
BasicIndex(测试实用程序)类的代码:
import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
/**
*
* @author renaud
*/
public class BasicIndex {
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter;
final Analyzer analyzer;
public BasicIndex(Analyzer analyzer) {
this.analyzer = analyzer;
this.indexWriter = newIndexWriter();
}
public Analyzer getAnalyzer() {
return analyzer;
}
private IndexWriter newIndexWriter() {
IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer);
try {
return new IndexWriter(directory, config);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public IndexSearcher newIndexSearcher() {
return new IndexSearcher(newIndexReader());
}
public IndexReader newIndexReader() {
IndexReader reader;
try {
reader = DirectoryReader.open(directory);
} catch (IOException ex) {
throw ExceptionUtils.asRuntimeException(ex);
}
return reader;
}
public void index(LinkedList<String> phrases, final String fieldName) {
index(phrases, (String phrase) -> {
Document doc = new Document();
Field workField = new TextField(fieldName, phrase, Field.Store.YES);
doc.add(workField);
return doc;
});
}
public void index(Supplier<Document> documents) {
Document document;
while ((document = documents.get()) != null) {
try {
indexWriter.addDocument(document);
} catch (IOException e) {
throw ExceptionUtils.asRuntimeException(e);
}
}
close();
}
public void index(LinkedList<String> phrases, Function<String, Document> docBuilder) {
for (String phrase : phrases) {
try {
indexWriter.addDocument(docBuilder.apply(phrase));
} catch (IOException e) {
throw ExceptionUtils.asRuntimeException(e);
}
}
close();
}
private void close() {
IOUtils.closeSilently(indexWriter);
}
public Map<Document, Float> search(Query query) {
final IndexSearcher indexSearcher = newIndexSearcher();
int hitsPerPage = 10;
TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
try {
indexSearcher.search(query, collector);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
ScoreDoc[] hits = collector.topDocs().scoreDocs;
Map<Document, Float> results = new LinkedHashMap<>();
for (int i = 0; i < hits.length; ++i) {
ScoreDoc scoreDoc = hits[i];
int docId = scoreDoc.doc;
float score = scoreDoc.score;
Document doc;
try {
doc = indexSearcher.doc(docId);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
results.put(doc, score);
}
return results;
}
public Document document(int i){
try {
return newIndexSearcher().doc(i);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
}
答案 0 :(得分:1)
好的我最终得到的解决方案是:
此外,我发现SOLR是一个很好的关键词,可以通过互联网搜索Lucene的信息,因为在保持接近Java代码的情况下,它的记录更多。
我对结果非常满意:
Score : 0.5
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc4> stored<count:1> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
Score : 0.33333334
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc2> stored<count:2> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar>>
Score : 0.25
Doc : Document<stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<id:doc1> stored<count:3> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:piano> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:electric guitar> stored,indexed,tokenized,omitNorms,indexOptions=DOCS_ONLY<tag:violon>>
更新的代码:
@Test
public void testListOfTags() throws Exception {
analyzer = new StandardAnalyzer();
basicIndex = new BasicIndex(analyzer);
Map<String, String> tags = new HashMap();
tags.put("doc1", "piano, electric guitar, violon");
tags.put("doc2", "piano, electric guitar");
tags.put("doc3", "piano");
tags.put("doc4", "electric guitar");
Queue<String> queue = new LinkedList<>();
queue.addAll(tags.keySet());
basicIndex.index(new Supplier<Document>() {
public Document get() {
Document doc = new Document();
if (queue.isEmpty()) {
return null;
}
String docName = queue.poll();
System.out.println("**** " + docName);
String tag = tags.get(docName);
doc.add(new StringField("id", docName, Field.Store.YES));
String[] tags = tag.split("\\,");
Field tagCountField = new IntField("count", tags.length, Field.Store.YES);
doc.add(tagCountField);
for (String tagItem : tags) {
System.out.println(tagItem);
Field tagField;
tagField = new StringField("tag", tagItem.trim(), Field.Store.YES);
System.out.println(tagField);
doc.add(tagField);
}
return doc;
}
});
BooleanQuery booleanQuery = new BooleanQuery();
//booleanQuery.add(new TermQuery(new Term("tag", "piano")), BooleanClause.Occur.SHOULD);
booleanQuery.add(new TermQuery(new Term("tag", "electric guitar")), BooleanClause.Occur.SHOULD);
//Query parsedQuery = new QueryParser("tag", analyzer).parse("tag:\"electric guitar\"");
query = booleanQuery;
//query = parsedQuery;
ValueSource boostSource = new ReciprocalFloatFunction(new IntFieldSource("count"), 1, 1, 1);
query = new BoostedQuery(query, boostSource);
System.out.println(query);
results = basicIndex.search(query);
displayResults(results);
System.out.println(Arrays.toString(basicIndex.document(3).getValues("tag")));
}