Lucene(5.3.1) demo

 

Lucene包含两部分内容:创建索引、检索。

 

package demo.mytest.lucene;

import java.io.IOException;
import java.nio.file.FileSystems;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Encoder;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.Scorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
import org.junit.Test;

import demo.mytest.lucene.utils.LuceneUtils;

public class HelloWorld {
	
	public static final String dataDir = "F:\\workspace\\luceneDemo\\dataDir\\CHANGES.txt";//文档所在路径 CHANGES.txt
	public static final String dataDir2 = "F:\\workspace\\luceneDemo\\dataDir\\bye.txt";
	public static final String indexDir = "F:\\workspace\\luceneDemo\\indexDir";//索引文件存储位置
	public static final Analyzer analyzer = new StandardAnalyzer();

	/**
	 * 
	 * testCreateIndex 创建索引库
	 * 
	 * @Description 
	 * @throws Exception void
	 * @see
	 */
	@Test
	public void testCreateIndex() throws Exception {
		
		IndexWriterConfig config = new IndexWriterConfig(analyzer);
		config.setOpenMode(OpenMode.CREATE);//设置indexWriter的打开方式,是新建或覆盖(CREATE)?是追加(APPEND)?还是两者结合(CREATE_OR_APPEND)?
		Directory directory = this.getFsIndexDirectory();//将索引文件保存的指定路径
		IndexWriter indexWriter = new IndexWriter(directory, config);//创建indexWriter对象
//		indexWriter.deleteAll();//删除索引库的所有文件
		
		Document doc = LuceneUtils.file2Document(dataDir);//处理、转换。将文件转换成文档对象
		indexWriter.addDocument(doc);
		
		Document doc2 = LuceneUtils.file2Document(dataDir2);//处理、转换。将文件转换成文档对象
		indexWriter.addDocument(doc2);
		
		indexWriter.close();//用完一定要关闭!
		/*
		 * 新添加一个Document并不会马上将它的索引写入最终的索引大文件,它的索引会暂时存于缓存。
		 * 当关闭时,会将缓存中的索引归并到索引大文件。如果此处没有关闭操作,那么刚刚添加的Document的索引不会保存到最终的索引大文件。
		 */
		directory.close();
	}

	/**
	 * 
	 * testTermQuery 关键词查询
	 * @throws IOException 
	 * 
	 * @Description 
	 * @see
	 */
	@Test
	public void testTermQuery() throws IOException {
		String queryStr = "bye.txt";
		Term term = new Term("fileName", queryStr);
		Query query = new TermQuery(term) ;
		
		this.queryAndPrintResult(query);
	}

	/**
	 * 
	 * testTermRangeQuery 范围查询
	 * 
	 * @Description
	 * @throws IOException void
	 * @see
	 */
	@Test
	public void testTermRangeQuery() throws IOException {
		Query query = new TermRangeQuery("fileSize",new BytesRef(10),new BytesRef(2000), true, true);
		this.queryAndPrintResult(query);
	}
	
	/**
	 * 
	 * queryAndPrintResult 测试查询对象用到的方法
	 * 
	 * @Description 
	 * @throws IOException void
	 * @see
	 */
	private void queryAndPrintResult(Query query) throws IOException {
		Directory directory = this.getFsIndexDirectory();//获取索引文件的存储路径
		IndexReader indexReader = DirectoryReader.open(directory);
		IndexSearcher indexSearcher = new IndexSearcher(indexReader);//从指定的路径的索引库检索指定的文本
		
		
		
		TopDocs topDocs = indexSearcher.search(query, 100000);
		
		ScoreDoc[] scoreDocs = topDocs.scoreDocs;

		System.out.println("共有【"+ topDocs.totalHits+"】条查询结果。\n----------------");
		//打印文档对象信息
		for(int i=0;i<scoreDocs.length;i++) {
			Document document = indexSearcher.doc(scoreDocs[i].doc);//按照文档编号取出相应的文档对象
			LuceneUtils.printDocumentInfo(document);
		}
		
		indexReader.close();//用完一定要关闭!
		directory.close();
	}
	
	/**
	 * 
	 * testSearch 检索
	 * 
	 * @Description 
	 * @throws IOException
	 * @throws ParseException void
	 * @see
	 */
	@Test
	public void testSearch() throws IOException, ParseException {
		String queryStr = "ok";//ok,Ok,OK,oK都能检索到文档中的OK关键字

		Directory directory = this.getFsIndexDirectory();//获取索引文件的存储路径
		IndexReader indexReader = DirectoryReader.open(directory);
		IndexSearcher indexSearcher = new IndexSearcher(indexReader);//从指定的路径的索引库检索指定的文本
		
		String[] fields = {"fileName","content"};
		QueryParser parser = new MultiFieldQueryParser(fields,analyzer);//在多个文本域中检索的解析器
//		QueryParser parser = new QueryParser("content", analyzer);//在content域中检索的解析器
		Query query = parser.parse(queryStr);//通过解析器将待检索的字符串转化成Query对象
//		System.out.println(",,,,,,,,,,,"+query.);
		//Filter filter = null;
		//indexSearcher.search(query, filter, 2000);//5.3.1中已经废止该方法
		
		TopDocs topDocs = indexSearcher.search(query, 100000);
		
		ScoreDoc[] scoreDocs = topDocs.scoreDocs;
		System.out.println("共有【"+ topDocs.totalHits+"】条查询结果。\n----------------");
		
		//打印文档对象信息
		for(int i=0;i<scoreDocs.length;i++) {
			Document document = indexSearcher.doc(scoreDocs[i].doc);//按照文档编号取出相应的文档对象
//			document.getField("content").
			LuceneUtils.printDocumentInfo(document);
		}
		
		
		indexReader.close();//用完一定要关闭!
		directory.close();
	}
	
	/**
	 * 
	 * testHighlighter 将搜索结果中的关键字高亮显示,并生成摘要文本
	 * @throws Exception 
	 * 
	 * @Description 
	 * @see
	 */
	@Test
	public void testHighlighter() throws Exception {
		String queryStr = "bye.txt";
		Directory directory = this.getFsIndexDirectory();//获取索引文件的存储路径
		IndexReader indexReader = DirectoryReader.open(directory);
		IndexSearcher indexSearcher = new IndexSearcher(indexReader);//从指定的路径的索引库检索指定的文本
		
		String[] fields = {"fileName","content"};
		QueryParser parser = new MultiFieldQueryParser(fields,analyzer);//在多个文本域中检索的解析器
		Query query = parser.parse(queryStr);//通过解析器将待检索的字符串转化成Query对象
		
		//1.获得查询结果-文档集合
		ScoreDoc[] scoreDocs = indexSearcher.search(query, 100000).scoreDocs;
		
		//2.高亮处理(构造高亮器+使用高亮器-高亮关键词),并打印文档对象信息
		//2.1 构造高亮器
		//--------------------
		Formatter formatter = new SimpleHTMLFormatter("<font color='red'>","</font>");
		
		Encoder encoder = new SimpleHTMLEncoder();
		
		Scorer fragmentScorer = new QueryScorer(query);
		Highlighter highlighter = new Highlighter(formatter, encoder, fragmentScorer);
		//设置高亮器
		final int FRAGMENT_SIZE = 50;
		Fragmenter fragmenter = new SimpleFragmenter(FRAGMENT_SIZE);//每一个fragment的字符长度
		highlighter.setTextFragmenter(fragmenter);
		//--------------------
		
		for(int i=0;i<scoreDocs.length;i++) {
			Document document = indexSearcher.doc(scoreDocs[i].doc);//按照文档编号取出相应的文档对象
			//2.2生成摘要文本,并将摘要中的关键词高亮显示
			//--------------------
			//抽取与关键词最相近的文本片段作为摘要文本
			String text = document.get("content");
			String ht = highlighter.getBestFragment(analyzer, "content", text);
			if(ht == null) {//若fileName域中包含关键词,而content域中没有关键词,ht则为空
				//显示文档从头开始的部分文本
				ht = text.substring(0, Math.min(text.length(), FRAGMENT_SIZE));
			}
			System.out.println("ht.length()="+ht.length());
			//将标记过关键词的信息重新设置到文档对象中
			//document.getField("content").setValue(ht);//没有setValue方法
			document.removeField("content");
			document.add(new Field("content", ht,TextField.TYPE_STORED));
			//--------------------
			
			LuceneUtils.printDocumentInfo(document);
		}
		
		
		indexReader.close();//用完一定要关闭!
		directory.close();
	}
	
	/**
	 * 
	 * test 
	 * @throws IOException 
	 * 
	 * @Description 
	 * 1.当应用启动时,将磁盘中的索引文件读入内存。
	 * 2.应用运行过程中,只对内存中的索引文件进行操作。保证运行速度
	 * 3.在应用关闭前,将内存中的索引文件同步到磁盘。保证数据不丢失
	 * void
	 * @see
	 */
	@Test 
	public void test() throws Exception {
		//1.当应用启动时,将磁盘中的索引文件读入内存。
		//获取磁盘上的索引库路径
		Directory fsDir = this.getFsIndexDirectory();
		
		//2.应用运行过程中,只对内存中的索引文件进行操作
		//创建基于内存的IndexWriter
		Directory ramDir = new RAMDirectory((FSDirectory)fsDir, new IOContext() );//创建对象的同时,将磁盘上的索引文件加载到内存
		IndexWriterConfig ramConfig = new IndexWriterConfig(analyzer);
		ramConfig.setOpenMode(OpenMode.CREATE);
		IndexWriter ramIndexWriter = new IndexWriter(ramDir,ramConfig);
		
		//应用程序运行过程中,添加Document对象
		String tmpDir = "F:\\workspace\\luceneDemo\\dataDir\\CHANGES.txt";//1.txt   CHANGES.txt
		Document doc = LuceneUtils.file2Document(tmpDir);//处理、转换。将文件转换成文档对象
		ramIndexWriter.addDocument(doc);

		ramIndexWriter.close();//操作完毕,一定要关闭!!!
		
		//3.在应用关闭前,将内存中的索引文件同步到磁盘。
		//创建基于磁盘的IndexWriter
		IndexWriterConfig fsConfig = new IndexWriterConfig(analyzer);
		fsConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
		IndexWriter fsIndexWriter = new IndexWriter(fsDir,fsConfig);
		fsIndexWriter.addIndexes(new Directory[] {ramDir});
		
		fsIndexWriter.close();//操作完毕,一定要关闭!!!
		
	}
	
	/**
	 * 
	 * testForceMerge 合并索引文件
	 * 
	 * @Description 优化操作
	 * @throws Exception void
	 * @see
	 */
	@Test
	public void testForceMerge() throws Exception {
		Directory fsDir = this.getFsIndexDirectory();
		IndexWriterConfig fsConfig = new IndexWriterConfig(analyzer);
		fsConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
		IndexWriter fsIndexWriter = new IndexWriter(fsDir,fsConfig);
		
		fsIndexWriter.commit();
		fsIndexWriter.forceMerge(2);//forceMerge()内部有flush操作
		fsIndexWriter.close();//操作完毕,一定要关闭!!!
	}
	
	/**
	 * 
	 * testAnalyzer 分词器的使用
	 * 
	 * @Description 英文有英文分词器,中文有中文分词器。根据语言不同,选择相应的分词器(第三方jar)
	 * @throws IOException void
	 * @see
	 */
	@Test
	public void testAnalyzer() throws IOException {
		String enText = "This house builds well.She really wants to live in.";
		String zhText = "这个房子真好,她很想住进来。再见";
		Analyzer en1 = new StandardAnalyzer();
//		Analyzer zh1 = new SimpleAnalyzer();
		this.analyze(en1, enText);
		System.out.println("--------------------");
		this.analyze(en1, zhText);
		
		
	}
	
	/**
	 * 
	 * analyze 分词
	 * 
	 * @Description
	 * @param analyzer 分词器
	 * @param text 文本
	 * @throws IOException void
	 * @see
	 */
	private void analyze(Analyzer analyzer, String text) throws IOException {
		TokenStream tokenStream = analyzer.tokenStream("content", text);
		
		//addAttribute():检查AttributeSource是否存在指定类的实例,如果存在,则返回该实例;不存在,则在AttributeSource中添加实例,并返回实例。
		OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);//OffsetAttribute:token的开始、结束偏移量属性。

		//从tokenStream中获取CharTermAttribute
	    CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class);//CharTermAttribute:token的词语(term)属性。
	     
		try {
			tokenStream.reset(); //清除状态,已到TokenStream的开始位置。在调用incrementToken()之前一定要调用reset()。 Resets this stream to the beginning. (Required)
			while(tokenStream.incrementToken()) {//移动到下一个token。已到TokenStream末尾,返回false
				System.out.println("----------CharTermAttribute----------");
				System.out.println("词语:"+termAtt.toString());
				System.out.println("----------OffsetAttribute----------");
				System.out.println("token: " + tokenStream.reflectAsString(false));
		        System.out.println("\ttoken开始偏移量: " + offsetAtt.startOffset());
		        System.out.println("\ttoken结束偏移量: " + offsetAtt.endOffset());
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			tokenStream.end();//到达TokenStream末尾后调用end(). Perform end-of-stream operations, e.g. set the final offset.
			tokenStream.close();//释放与流有关的资源
		}
		
	}
	
	/**
	 * 
	 * getFsIndexDirectory 获取索引库的Directory
	 * 
	 * @Description 
	 * @return Directory
	 * @see
	 */
	private Directory getFsIndexDirectory() {
		try {
			return FSDirectory.open(FileSystems.getDefault().getPath(indexDir));//基于磁盘的索引库,也可以基于内存:Directory directory = new RAMDirectory();
		} catch (IOException e) {
			throw new RuntimeException(e);
		}
	}
}

 

 

 使用到的工具类:

 

package demo.mytest.lucene.utils;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;

/**
 * 
 * LuceneUtils 工具类
 * 
 */
public class LuceneUtils {
	
	/**
	 * 
	 * file2Document 将文件转换成文档对象
	 * 
	 * @Description 
	 * @param path
	 * @return
	 * @throws Exception Document
	 * @see
	 */
	public static Document file2Document(String path) throws Exception {
		File file = new File(path);
		Document doc = new Document();
		doc.add(new Field("fileName", file.getName(), TextField.TYPE_STORED));
		doc.add(new Field("content", getFileContent(file),TextField.TYPE_STORED));
		doc.add(new Field("fileSize", String.valueOf(file.getTotalSpace()), TextField.TYPE_STORED));
		doc.add(new Field("path", file.getAbsolutePath(),TextField.TYPE_STORED));
		//doc.add(new Field("fileName",String.valueOf(file.getName()),Store.YES, Index.ANALYZED));
		System.out.println(file.getAbsolutePath()+"\t"+file.isDirectory()+"\n"+getFileContent(file));
		doc.add(new Field("isDirectory",String.valueOf(file.isDirectory()),TextField.TYPE_STORED));
		
		return doc;
	}
	
	/**
	 * 
	 * printDocumentInfo 打印文档对象信息
	 * 
	 * @Description 打印文档对象信息
	 * @param document 文档对象
	 * void
	 * @see
	 */
	public static void printDocumentInfo(Document document) {
//		IndexableField field = document.getField("fileName");	
//		System.out.println(field.name() + ":" +field.stringValue());//结果---fileName:CHANGES.txt。此处的document.getField("fileName").stringValue()相当于document.get("fileName")
		//打印文档对象信息
		System.out.println("文档("+document.get("fileName")+")的信息-"
		+"内容:"+document.get("content")+"\t存储位置:"+document.get("path")
		+"\t文件大小:" + document.get("fileSize") +"\t是否为文件夹:"+ document.get("isDirectory"));
	}
	
	/**
	 * 
	 * getFileContent 取得文件内容
	 * 
	 * @Description 
	 * @param file
	 * @return
	 * @throws Exception String
	 * @see
	 */
	private static String getFileContent(File file) throws Exception {
		StringBuffer sbf = new StringBuffer();
		BufferedReader br = new BufferedReader(new FileReader(file));
		String line = null;
		while ((line = br.readLine()) != null) {
			sbf.append(line).append("\n");
		}
		br.close();
		return sbf.toString();
	}
}

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,下面是一个简单的 Lucene9 的示例代码,包括创建索引和搜索: ```java import java.io.IOException; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.classic.ParseException; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.RAMDirectory; import java.nio.file.Paths; public class LuceneDemo { public static void main(String[] args) throws IOException, ParseException { // 创建索引 Directory directory = FSDirectory.open(Paths.get("index")); IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer()); IndexWriter writer = new IndexWriter(directory, config); Document doc1 = new Document(); doc1.add(new TextField("content", "Lucene is a full-text search engine library in Java", Field.Store.YES)); writer.addDocument(doc1); Document doc2 = new Document(); doc2.add(new TextField("content", "Lucene is used to build search applications", Field.Store.YES)); writer.addDocument(doc2); writer.close(); // 搜索 Directory directory2 = FSDirectory.open(Paths.get("index")); IndexSearcher searcher = new IndexSearcher(directory2); // TermQuery Query termQuery = new TermQuery(new Term("content", "search")); TopDocs topDocs = searcher.search(termQuery, 10); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { Document doc = searcher.doc(scoreDoc.doc); System.out.println(doc.get("content") + " score: " + scoreDoc.score); } // QueryParser QueryParser parser = new QueryParser("content", new StandardAnalyzer()); Query query = parser.parse("Lucene search"); TopDocs topDocs2 = searcher.search(query, 10); for (ScoreDoc scoreDoc : topDocs2.scoreDocs) { Document doc = searcher.doc(scoreDoc.doc); System.out.println(doc.get("content") + " score: " + scoreDoc.score); } searcher.getIndexReader().close(); directory2.close(); } } ``` 以上代码创建了一个包含两条文档的索引,然后使用 TermQuery 和 QueryParser 分别进行了搜索,并打印出了搜索结果。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值