使用Lucene 4.3.1,如何获取所有文档的子范围内出现的所有术语

时间:2013-07-22 14:20:32

标签: lucene

假设lucene索引包含字段:date,content。 我希望获得昨天的日期所有文档的价值和频率。日期字段是关键字字段。对内容字段进行分析和索引。

请帮我提供示例代码。

1 个答案:

答案 0 :(得分:0)

我的解决方案来源如下......

/**
 * 
 * 
 * @param reader
 * @param fromDateTime
 *            - yyyymmddhhmmss 
 * @param toDateTime
 *            - yyyymmddhhmmss 
 * @return 
 */
static public String top10(IndexSearcher searcher, String fromDateTime,
        String toDateTime) {
    String top10Query = "";
    try {
        Query query = new TermRangeQuery("tweetDate", new BytesRef(
                fromDateTime), new BytesRef(toDateTime), true, false);
        final BitSet bits = new BitSet(searcher.getIndexReader().maxDoc());
        searcher.search(query, new Collector() {

            private int docBase;

            @Override
            public void setScorer(Scorer scorer) throws IOException {
            }

            @Override
            public void setNextReader(AtomicReaderContext context)
                    throws IOException {
                this.docBase = context.docBase;
            }

            @Override
            public void collect(int doc) throws IOException {
                bits.set(doc + docBase);
            }

            @Override
            public boolean acceptsDocsOutOfOrder() {
                return false;
            }
        });

        //
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43,
                EnglishStopWords.getEnglishStopWords());

        //
        HashMap<String, Long> wordFrequency = new HashMap<>();
        for (int wx = 0; wx < bits.length(); ++wx) {
            if (bits.get(wx)) {
                Document wd = searcher.doc(wx);
                //
                TokenStream tokenStream = analyzer.tokenStream("temp",
                        new StringReader(wd.get("content")));
                // OffsetAttribute offsetAttribute = tokenStream
                // .addAttribute(OffsetAttribute.class);
                CharTermAttribute charTermAttribute = tokenStream
                        .addAttribute(CharTermAttribute.class);
                tokenStream.reset();
                while (tokenStream.incrementToken()) {
                    // int startOffset = offsetAttribute.startOffset();
                    // int endOffset = offsetAttribute.endOffset();
                    String term = charTermAttribute.toString();
                    if (term.length() < 2)
                        continue;
                    Long wl;
                    if ((wl = wordFrequency.get(term)) == null)
                        wordFrequency.put(term, 1L);
                    else {
                        wl += 1;
                        wordFrequency.put(term, wl);
                    }
                }
                tokenStream.end();
                tokenStream.close();
            }
        }
        analyzer.close();

        // sort
        List<String> occurterm = new ArrayList<String>();
        for (String ws : wordFrequency.keySet()) {
            occurterm.add(String.format("%06d\t%s", wordFrequency.get(ws),
                    ws));
        }
        Collections.sort(occurterm, Collections.reverseOrder());

        // make query string by top 10 words
        int topCount = 10;
        for (String ws : occurterm) {
            if (topCount-- == 0)
                break;
            String[] tks = ws.split("\\t");
            top10Query += tks[1] + " ";
        }
        top10Query.trim();
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
    }
    // return top10 word string
    return top10Query;
}