/*
*这里是使用Lucene进行相关度排序
*使用Score进行自然排序
*相关度排序是一种最简单的排序方式。
*所谓相关度,其实是文档的得分
*文档的得分在每次查找的时候都不同,
*需要根据查找的关键字来确定。
*
*tf词条频率
*idf反转文档频率
*boost Field的激励因子
*LengthNum长度因子
import java.io.IOException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
public class Native_Score {
public Native_Score(String INDEX_STORE_PATH)
{
try{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH, new StandardAnalyzer(), true);
writer.setUseCompoundFile(false);
//创建3个文档
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field("bookname", "ab bc", Field.Store.YES, Index.TOKENIZED);
Field f2 = new Field("bookname", "ab bc cd", Field.Store.YES, Index.TOKENIZED);
Field f3 = new Field("bookname", "ab", Field.Store.YES, Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
IndexSearcher searcher = new IndexSearcher(INDEX_STORE_PATH);
TermQuery q = new TermQuery(new Term("bookname", "bc"));
Hits hits = searcher.search(q);
for(int i = 0; i < hits.length(); i++){
Document doc = hits.doc(i);
//与之匹配的文档
System.out.println(doc.get("bookname") + "\t\t");
//文档所得分数
System.out.println(hits.score(i));
//这里通过解释的方法,来了解这个分数是怎么回事
System.out.println(searcher.explain(q, hits.id(i)).toString());
}
}catch(IOException e){
e.printStackTrace();
}
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Native_Score nv = new Native_Score("E:\\Lucene项目\\索引文件");
}
}
*这里是使用Lucene进行相关度排序
*使用Score进行自然排序
*相关度排序是一种最简单的排序方式。
*所谓相关度,其实是文档的得分
*文档的得分在每次查找的时候都不同,
*需要根据查找的关键字来确定。
*
*tf词条频率
*idf反转文档频率
*boost Field的激励因子
*LengthNum长度因子
*/
结果图:
import java.io.IOException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
public class Native_Score {
public Native_Score(String INDEX_STORE_PATH)
{
try{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH, new StandardAnalyzer(), true);
writer.setUseCompoundFile(false);
//创建3个文档
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field("bookname", "ab bc", Field.Store.YES, Index.TOKENIZED);
Field f2 = new Field("bookname", "ab bc cd", Field.Store.YES, Index.TOKENIZED);
Field f3 = new Field("bookname", "ab", Field.Store.YES, Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
IndexSearcher searcher = new IndexSearcher(INDEX_STORE_PATH);
TermQuery q = new TermQuery(new Term("bookname", "bc"));
Hits hits = searcher.search(q);
for(int i = 0; i < hits.length(); i++){
Document doc = hits.doc(i);
//与之匹配的文档
System.out.println(doc.get("bookname") + "\t\t");
//文档所得分数
System.out.println(hits.score(i));
//这里通过解释的方法,来了解这个分数是怎么回事
System.out.println(searcher.explain(q, hits.id(i)).toString());
}
}catch(IOException e){
e.printStackTrace();
}
}
public static void main(String[] args) {
// TODO Auto-generated method stub
Native_Score nv = new Native_Score("E:\\Lucene项目\\索引文件");
}
}