1、什么是索引库
索引库是Lucene的重要的存储结构,它包括二部份:原始记录表,词汇表
原始记录表:存放的是原始记录信息,Lucene为存入的内容分配一个唯一的编号
词汇表:存放的是经过分词器拆分出来的词汇和该词汇在原始记录表中的编号
2、为什么要将索引库进行优化
在默认情况下,向索引库中增加一个Document对象时,索引库自动会添加一个扩展名叫*.cfs的二进制压缩文件,如果向索引库中存Document对象过多,那么*.cfs也会不断增加,同时索引库的容量也会不断增加,影响索引库的大小。
3、索引库优化方案
3.1、合并cfs文件
合并后的cfs文件是二进制压缩字符,能解决是的文件大小和数量的问题
indexWriter.addDocument(document);
indexWriter.optimize();
indexWriter.close();
3.2、设定合并因子
自动合并cfs文件,默认10个cfs文件合并成一个cfs文件
indexWriter.addDocument(document);
indexWriter.setMergeFactor(3);
indexWriter.close();
3.3、使用RAMDirectory
类似于内存索引库,能解决是的读取索引库文件的速度问题,
它以空间换时间,提高速度快,但不能持久保存,因此启动时加载硬盘中的索引库到内存中的索引库,退出时将内存中的索引库保存到硬盘中的索引库,且内容不能重复。
Directory fsDirectory = FSDirectory.open(new File("D:/rk/indexDB"));
Directory ramDirectory = new RAMDirectory(fsDirectory);
IndexWriter fsIndexWriter = new IndexWriter(fsDirectory,LuceneUtil.getAnalyzer(),true,LuceneUtil.getMaxFieldLength());
IndexWriter ramIndexWriter = new IndexWriter(ramDirectory,LuceneUtil.getAnalyzer(),LuceneUtil.getMaxFieldLength());
ramIndexWriter.addDocument(document);
ramIndexWriter.close();
fsIndexWriter.addIndexesNoOptimize(ramDirectory);
fsIndexWriter.close();
ArticleDao.java
package com.rk.lucene.a_optimize;
import java.io.File;
import java.util.List;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.junit.Test;
import com.rk.lucene.entity.Article;
import com.rk.lucene.utils.LuceneUtils;
/**
* 对索引库进行优化
*/
public class ArticleDao {
/**
* 增加document对象到索引库中
*/
@Test
public void testAdd01() throws Exception{
Article article = new Article(1, "你好", "世界,你好!Hello World");
Document document = LuceneUtils.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtils.getDirectory(), LuceneUtils.getAnalyzer(), LuceneUtils.getMaxFieldLength());
indexWriter.addDocument(document);
indexWriter.close();
}
/**
* 合并cfs文件,合并后的cfs文件是二进制压缩字符,能解决是的文件大小和数量的问题
*/
@Test
public void testAdd02() throws Exception{
Article article = new Article(2, "你好", "世界,你好!Hello World");
Document document = LuceneUtils.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtils.getDirectory(), LuceneUtils.getAnalyzer(), LuceneUtils.getMaxFieldLength());
indexWriter.addDocument(document);
//合并cfs文本
indexWriter.optimize();
indexWriter.close();
}
/**
* 设定合并因子,自动合并cfs文件
*/
@Test
public void testAdd03() throws Exception{
Article article = new Article(3, "你好", "世界,你好!Hello World");
Document document = LuceneUtils.javabean2document(article);
IndexWriter indexWriter = new IndexWriter(LuceneUtils.getDirectory(), LuceneUtils.getAnalyzer(), LuceneUtils.getMaxFieldLength());
indexWriter.addDocument(document);
//设置合并因子,即满足3个cfs文本一合并
indexWriter.setMergeFactor(3);
indexWriter.close();
}
/**
* 使用RAMDirectory,类似于内存索引库,能解决是的读取索引库文件的速度问题
*/
@Test
public void testAdd04() throws Exception{
Article article = new Article(4, "你好", "世界,你好!Hello World");
Document document = LuceneUtils.javabean2document(article);
//硬盘索引库
Directory fsDirectory = FSDirectory.open(new File("D:/rk/indexDB"));
//内存索引库,因为硬盘索引库的内容要同步到内存索引库中
Directory ramDirectory = new RAMDirectory(fsDirectory);
//指向硬盘索引库的字符流,true表示如果内存索引库中和硬盘索引库中的相同的document对象时,先删除硬盘索引库中的document对象,
//再将内存索引库的document对象写入硬盘索引库中
//反之是false,默认为false,这个boolean值写在硬盘字符流的构造器
IndexWriter fsIndexWriter = new IndexWriter(fsDirectory, LuceneUtils.getAnalyzer(), true, LuceneUtils.getMaxFieldLength());
//指向内存索引库的字符流
IndexWriter ramIndexWriter = new IndexWriter(ramDirectory, LuceneUtils.getAnalyzer(), LuceneUtils.getMaxFieldLength());
//将document对象写入内存索引库
ramIndexWriter.addDocument(document);
ramIndexWriter.close();
//将内存索引库的所有document对象同步到硬盘索引库中
fsIndexWriter.addIndexesNoOptimize(ramDirectory);
fsIndexWriter.close();
}
@Test
public void Search() throws Exception{
String keyword = "世界";
List<Article> list = LuceneUtils.search("content", keyword, 10, Article.class);
LuceneUtils.printList(list);
}
}
LuceneUtils.java
package com.rk.lucene.utils;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import com.rk.lucene.entity.Page;
public class LuceneUtils {
private static Directory directory;
private static Version version;
private static Analyzer analyzer;
private static MaxFieldLength maxFieldLength;
private static final String LUCENE_DIRECTORY= "D:/rk/indexDB";
static{
try {
directory = FSDirectory.open(new File(LUCENE_DIRECTORY));
version = Version.LUCENE_30;
analyzer = new StandardAnalyzer(version);
maxFieldLength = MaxFieldLength.LIMITED;
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
//不让外部new当前帮助类的对象
private LuceneUtils(){}
public static <T> void pagination(Page<T> page,String field,String keyword,Class<T> clazz) throws Exception{
QueryParser queryParser = new QueryParser(getVersion(), field, getAnalyzer());
Query query = queryParser.parse(keyword);
IndexSearcher indexSearcher = new IndexSearcher(getDirectory());
TopDocs topDocs = indexSearcher.search(query, 200);
int totalHits = topDocs.totalHits;
int curPage = page.getCurPage();
int pageSize = page.getPageSize();
int quotient = totalHits / pageSize;
int remainder = totalHits % pageSize;
int totalPages = remainder==0 ? quotient : quotient+1;
int startIndex = (curPage-1) * pageSize;
int stopIndex = Math.min(startIndex + pageSize, totalHits);
List<T> list = page.getItems();
if(list == null){
list = new ArrayList<T>();
page.setItems(list);
}
list.clear();
for(int i=startIndex;i<stopIndex;i++){
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int docIndex = scoreDoc.doc;
Document document = indexSearcher.doc(docIndex);
T t = document2javabean(document, clazz);
list.add(t);
}
page.setTotalPages(totalPages);
page.setTotalItems(totalHits);
indexSearcher.close();
}
public static <T> void add(T t) throws Exception{
Document document = javabean2document(t);
IndexWriter indexWriter = new IndexWriter(getDirectory(), getAnalyzer(), getMaxFieldLength());
indexWriter.addDocument(document);
indexWriter.close();
}
public static <T> void addAll(List<T> list) throws Exception{
IndexWriter indexWriter = new IndexWriter(getDirectory(), getAnalyzer(), getMaxFieldLength());
for(T t : list){
Document doc = javabean2document(t);
indexWriter.addDocument(doc);
}
indexWriter.close();
}
public static <T> void update(String field,String value,T t) throws Exception{
Document document = javabean2document(t);
IndexWriter indexWriter = new IndexWriter(getDirectory(), getAnalyzer(), getMaxFieldLength());
indexWriter.updateDocument(new Term(field,value), document);
indexWriter.close();
}
public static <T> void delete(String field,String value) throws Exception{
IndexWriter indexWriter = new IndexWriter(getDirectory(), getAnalyzer(), getMaxFieldLength());
indexWriter.deleteDocuments(new Term(field,value));
indexWriter.close();
}
/**
* 删除所有记录
*/
public static void deleteAll() throws Exception {
IndexWriter indexWriter = new IndexWriter(getDirectory(), getAnalyzer(), getMaxFieldLength());
indexWriter.deleteAll();
indexWriter.close();
}
/**
* 根据关键字进行搜索
*/
public static <T> List<T> search(String field,String keyword,int topN,Class<T> clazz) throws Exception{
List<T> list = new ArrayList<T>();
QueryParser queryParser = new QueryParser(getVersion(), field, getAnalyzer());
Query query = queryParser.parse(keyword);
IndexSearcher indexSearcher = new IndexSearcher(getDirectory());
TopDocs topDocs = indexSearcher.search(query, topN);
for(int i=0;i<topDocs.scoreDocs.length;i++){
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
int docIndex = scoreDoc.doc;
System.out.println("文档索引号" + docIndex + ",文档得分:" + scoreDoc.score);
Document document = indexSearcher.doc(docIndex);
T entity = (T) document2javabean(document, clazz);
list.add(entity);
}
indexSearcher.close();
return list;
}
/**
* 打印List
*/
public static <T> void printList(List<T> list){
if(list != null && list.size()>0){
for(T t : list){
System.out.println(t);
}
}
}
//将JavaBean转成Document对象
public static Document javabean2document(Object obj) throws Exception{
//创建Document对象
Document document = new Document();
//获取obj引用的对象字节码
Class clazz = obj.getClass();
//通过对象字节码获取私有的属性
java.lang.reflect.Field[] reflectFields = clazz.getDeclaredFields();
//迭代
for(java.lang.reflect.Field reflectField : reflectFields){
//反射
reflectField.setAccessible(true);
//获取字段名
String name = reflectField.getName();
//获取字段值
String value = reflectField.get(obj).toString();
//加入到Document对象中去,这时javabean的属性与document对象的属性相同
document.add(new Field(name, value, Store.YES, Index.ANALYZED));
}
//返回document对象
return document;
}
//将Document对象转换成JavaBean对象
public static <T> T document2javabean(Document document,Class<T> clazz) throws Exception{
T obj = clazz.newInstance();
java.lang.reflect.Field[] reflectFields = clazz.getDeclaredFields();
for(java.lang.reflect.Field reflectField : reflectFields){
reflectField.setAccessible(true);
String name = reflectField.getName();
String value = document.get(name);
BeanUtils.setProperty(obj, name, value);
}
return obj;
}
public static Directory getDirectory() {
return directory;
}
public static void setDirectory(Directory directory) {
LuceneUtils.directory = directory;
}
public static Version getVersion() {
return version;
}
public static void setVersion(Version version) {
LuceneUtils.version = version;
}
public static Analyzer getAnalyzer() {
return analyzer;
}
public static void setAnalyzer(Analyzer analyzer) {
LuceneUtils.analyzer = analyzer;
}
public static MaxFieldLength getMaxFieldLength() {
return maxFieldLength;
}
public static void setMaxFieldLength(MaxFieldLength maxFieldLength) {
LuceneUtils.maxFieldLength = maxFieldLength;
}
}
转载于:https://blog.51cto.com/lsieun/1852487