lucene 开发

package com.xxx.xxx.component.lucene.service.impl;


import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;


import org.apache.commons.lang.StringUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;


import com.chenlb.mmseg4j.analysis.MMSegAnalyzer;
import com.xxx.plat.component.lucene.model.Index;
import com.xxx.plat.component.lucene.model.IndexField;
import com.xxx.plat.component.lucene.service.IndexService;
import com.xxx.plat.component.lucene.util.IndexUtil;
//import org.apache.lucene.search.SearcherManager;


@Service("indexService")
public class IndexServiceImpl implements IndexService {
private static Logger log = LoggerFactory.getLogger(IndexServiceImpl.class);


@Override
public void addIndex(String indexPath, IndexField indexField) {
IndexWriter writer = null;
Directory dir = null;
try {
//索引目录
dir = FSDirectory.open(new File(indexPath));
//分词器,参数为词库位置
Analyzer analyzer = new MMSegAnalyzer(new File(IndexUtil.MMSEG4J_PATH));
IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_35, analyzer);
// 优化策略
iwConfig.setMergePolicy(optimizeIndex());
//索引是追加还是新建
iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
writer = new IndexWriter(dir, iwConfig);
Document doc = this.field2Doc(indexField);
writer.addDocument(doc);
writer.commit();  
} catch (IOException e) {
e.printStackTrace();
}finally{
if(writer!=null)
try {
writer.close();
} catch (CorruptIndexException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}finally{
try {  
                        if (dir != null && IndexWriter.isLocked(dir)) {  
                            IndexWriter.unlock(dir);  
                        }  
                    } catch (IOException e) {  
                        e.printStackTrace();  
                    }  
}
}
}


@Override
public Map<String,Object> searchFromRootIndex(String queryStr,String indexPath,String srchType, boolean isSearchSubDir,int startRow,int count){
Map<String,Object> map = new HashMap<String,Object>();
if(isSearchSubDir){
// 如果搜索子目录,提取子目录路径
List<String> indexPaths = new ArrayList<String>();
indexPaths.add(indexPath);
File file = new File(indexPath);
File[] subFile = file.listFiles();
for (int i = 0; i < subFile.length; i++) {
if (subFile[i].isDirectory()) {
indexPaths.add(subFile[i].getAbsolutePath());
}
}
// 搜索当前目录以及当前目录的子目录
map = this.searchFromMultiIndex(queryStr, indexPaths,srchType,startRow,count);
}else{
// 只搜索当前目录
map = this.searchFromSingleIndex(queryStr, indexPath,srchType,startRow,count);
}
return map;
}


@Override
public Map<String,Object> searchFromMultiIndex(String queryStr,List<String> indexPaths,String srchType,int startRow,int count) {
Map<String,Object> resultMap = new HashMap<String,Object>();
Analyzer analyzer = null;
Query query = null;
MultiReader multiReader =null;
        List<Index> indexList = new ArrayList<Index>();
try {
//分词器,file为词典目录
analyzer = new MMSegAnalyzer(new File(IndexUtil.MMSEG4J_PATH));
//索引目录 
IndexReader[] idxReaders = new IndexReader[indexPaths.size()];
for(int i=0;i<idxReaders.length; i++){
idxReaders[i] = IndexReader.open(FSDirectory.open(new File(indexPaths.get(i))));
}
multiReader = new MultiReader(idxReaders);
IndexSearcher searcher = new IndexSearcher(multiReader);

BooleanQuery bqKey = new BooleanQuery(); //保存关键字 Query集合

QueryParser titleQueryParser = new QueryParser(Version.LUCENE_35,IndexField.TITLE, analyzer);
Query titleQuery = titleQueryParser.parse(queryStr);

QueryParser contentQueryParser = new QueryParser(Version.LUCENE_35,IndexField.CONTENT, analyzer);
Query contentQuery = contentQueryParser.parse(queryStr);

bqKey.add(titleQuery, BooleanClause.Occur.SHOULD);
bqKey.add(contentQuery, BooleanClause.Occur.SHOULD);

if(StringUtils.isNotBlank(srchType)){
BooleanQuery bqType = new BooleanQuery(); //保存关 类型 Query集合
BooleanQuery bqNew = new BooleanQuery(); //最终组合
Query typeQuery = new TermQuery(new Term(IndexField.TYPE, srchType)); //不分词,直接全字匹配
bqType.add(typeQuery, BooleanClause.Occur.MUST);

bqNew.add(bqKey,BooleanClause.Occur.MUST);
bqNew.add(bqType,BooleanClause.Occur.MUST);
query = bqNew;
}else{
query = bqKey;
}
Sort sort = new Sort(new SortField(IndexField.ID,SortField.SCORE,true));  
TopFieldCollector collector = TopFieldCollector.create(sort, startRow + count, false, false, false, false);
searcher.search(query, collector);
TopDocs tds=collector.topDocs(startRow,count);
//System.out.println("查询结果总数为:" + tds.totalHits+"最大的评分:"+tds.getMaxScore());  
indexList = this.buildResultBySearch(searcher, analyzer, query,tds);
resultMap.put("pageList", indexList);
resultMap.put("totalCount", collector.getTotalHits());
} catch (IOException e) {
e.printStackTrace();
} catch (org.apache.lucene.queryParser.ParseException e) {
e.printStackTrace();
} catch (InvalidTokenOffsetsException e) {
e.printStackTrace();
}  finally{
try {
multiReader.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return resultMap;
}


/**
* 只搜索当前目录
* @param queryStr:搜索关键字
* @param indexPath:索引路径
* @param srchType:索引类型
* @return
*/
private Map<String,Object> searchFromSingleIndex(String queryStr,String indexPath,String srchType,int startRow,int count) {
Map<String,Object> resultMap = new HashMap<String,Object>();
IndexReader reader = null;
Analyzer analyzer = null;
Query query = null;
        List<Index> indexList = new ArrayList<Index>();
try {
//分词器,file为词典目录
analyzer = new MMSegAnalyzer(new File(IndexUtil.MMSEG4J_PATH));
//索引目录
Directory dir = FSDirectory.open(new File(indexPath));
reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader); 

BooleanQuery bqKey = new BooleanQuery(); //保存关键字 Query集合

QueryParser titleQueryParser = new QueryParser(Version.LUCENE_35,IndexField.TITLE, analyzer);
Query titleQuery = titleQueryParser.parse(queryStr);

QueryParser contentQueryParser = new QueryParser(Version.LUCENE_35,IndexField.CONTENT, analyzer);
Query contentQuery = contentQueryParser.parse(queryStr);

bqKey.add(titleQuery, BooleanClause.Occur.SHOULD);
bqKey.add(contentQuery, BooleanClause.Occur.SHOULD);

if(StringUtils.isNotBlank(srchType)){
BooleanQuery bqType = new BooleanQuery(); //保存关 类型 Query集合
BooleanQuery bqNew = new BooleanQuery(); //最终组合
Query typeQuery = new TermQuery(new Term(IndexField.TYPE, srchType)); //不分词,直接全字匹配
bqType.add(typeQuery, BooleanClause.Occur.MUST);

bqNew.add(bqKey,BooleanClause.Occur.MUST);
bqNew.add(bqType,BooleanClause.Occur.MUST);
query = bqNew;
}else{
query = bqKey;
}

//Sort sort = new Sort(new SortField[]{new SortField(null,SortField.SCORE,false),new SortField("Time", SortField.INT,true)});  
Sort sort = new Sort(new SortField(IndexField.ID,SortField.SCORE,true));  
TopFieldCollector collector = TopFieldCollector.create(sort, startRow - 1 + count, false, false, false, false);
searcher.search(query, collector);
TopDocs tds=collector.topDocs(startRow - 1,count);
indexList = this.buildResultBySearch(searcher, analyzer, query,tds);
resultMap.put("pageList", indexList);
resultMap.put("totalCount", collector.getTotalHits());
} catch (IOException e) {
e.printStackTrace();
} catch (org.apache.lucene.queryParser.ParseException e) {
e.printStackTrace();
} catch (InvalidTokenOffsetsException e) {
e.printStackTrace();
}  finally{
try {
reader.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return resultMap;
}

/**
* 把搜索结果组装成index对象
* @param searcher
* @param analyzer
* @param query
* @param tds
* @return
* @throws InvalidTokenOffsetsException
* @throws CorruptIndexException
* @throws IOException
*/
private List<Index> buildResultBySearch(IndexSearcher searcher,
Analyzer analyzer, Query query, TopDocs tds)
throws InvalidTokenOffsetsException, CorruptIndexException, IOException {
List<Index> indexList = new ArrayList<Index>();
for (ScoreDoc sd : tds.scoreDocs) {
Index index = new Index();
Document doc = searcher.doc(sd.doc);
String objId = doc.get(IndexField.OBJ_ID);
String type = doc.get(IndexField.TYPE);
String title = doc.get(IndexField.TITLE);
String summary = doc.get(IndexField.CONTENT);
index.setObjId(Long.parseLong(objId));
title = lighterStr(analyzer, query, title, Index.TITLE);
if (StringUtils.equals(type, IndexUtil.TYPE_OPUS)) {
index.setTitle(title);
log.info("title:" + title);
} else if (StringUtils.equals(type, IndexUtil.TYPE_ATTACHMENT)) {
//如果是附件,则把parentTitle设置为title,附件本身的title设置为子title
String parentTitle = doc.get(IndexField.PARENT_TITLE);
String parentId = doc.get(IndexField.PARENT_ID);
parentTitle = lighterStr(analyzer, query, parentTitle,IndexField.PARENT_TITLE);
index.setObjId(Long.parseLong(parentId));
index.setTitle(parentTitle);
index.setAttachId(Long.parseLong(objId));
index.setAttachTitle(title);
}else if(StringUtils.equals(type, IndexUtil.BUZILOG)){
index.setTitle(lighterStr(analyzer, query, title, Index.TITLE));
}
summary = lighterStr(analyzer, query, summary, Index.SUMMARY);
index.setSummary(summary);
indexList.add(index);
log.info("summary:" + summary);
}
return indexList;
}

/**
* 设置高亮
* @param a
* @param query
* @param txt
* @param fieldname
* @return
* @throws IOException
* @throws InvalidTokenOffsetsException
*/
private String lighterStr(Analyzer a,Query query,String txt,String fieldname) throws IOException, InvalidTokenOffsetsException {
String str =  null;
QueryScorer scorer = new QueryScorer(query);
Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
//定义高亮的html格式
Formatter fmt = new SimpleHTMLFormatter("<span style='color:red'>", "</span>");
Highlighter lighter = new Highlighter(fmt, scorer);
lighter.setTextFragmenter(fragmenter);
str = lighter.getBestFragment(a, fieldname,txt);
if(StringUtils.isBlank(str)) {
if(txt.length()>=200) {
txt = txt.substring(0, 200);
txt = txt+"...";
}
return txt;
} else return str.trim();
}



/**
* 将indexfield组装为document
* @param field
* @return
*/
private Document field2Doc(IndexField field) {
Document doc = new Document();
doc.add(new Field(IndexField.ID,field.getId(),Field.Store.YES,Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field(IndexField.TITLE,field.getTitle(),Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field(IndexField.CONTENT,field.getContent(),Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field(IndexField.OBJ_ID,field.getObjId(),Field.Store.YES,Field.Index.NOT_ANALYZED));
doc.add(new Field(IndexField.PARENT_ID,field.getParentId(),Field.Store.YES,Field.Index.NOT_ANALYZED));
doc.add(new Field(IndexField.PARENT_TITLE,StringUtils.isBlank(field.getParentTitle())?"":field.getParentTitle(), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field(IndexField.TYPE,field.getType(),Field.Store.YES,Field.Index.NOT_ANALYZED));
return doc;
}


@Override
public void deleteIndex(String indexPath,String indexFlag, String objId) {
IndexWriter writer = null;
Directory dir = null;
try {
//索引目录
dir = FSDirectory.open(new File(indexPath));
//分词器,参数为词库位置
Analyzer analyzer = new MMSegAnalyzer(new File(IndexUtil.MMSEG4J_PATH));
IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_35, analyzer);
// 优化策略
iwConfig.setMergePolicy(optimizeIndex());
//索引是追加还是新建
iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
writer = new IndexWriter(dir, iwConfig);

BooleanQuery bqNew = new BooleanQuery(); //最终组合
Query objIdQuery = new TermQuery(new Term(IndexField.OBJ_ID, objId)); //不分词,直接全字匹配
Query typeQuery = new TermQuery(new Term(IndexField.TYPE, indexFlag)); //不分词,直接全字匹配
bqNew.add(objIdQuery, BooleanClause.Occur.MUST);
bqNew.add(typeQuery, BooleanClause.Occur.MUST);

//参数是一个选项,可以是一个Query,也可以是一个term,term是一个精确查找的值  
writer.deleteDocuments(bqNew);  
writer.commit();
} catch (IOException e) {
e.printStackTrace();
}finally{
if(writer!=null)
try {
writer.close();
} catch (CorruptIndexException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}finally{
try {  
                        if (dir != null && IndexWriter.isLocked(dir)) {  
                            IndexWriter.unlock(dir);  
                        }  
                    } catch (IOException e) {  
                        e.printStackTrace();  
                    }  
}
}
}


/**
* 更新索引逻辑:先删除,再新增
*/
@Override
@Transactional
public void updateIndex(String indexPath,String indexFlag, IndexField indexField) {
//先删除
deleteIndex(indexPath,indexFlag,indexField.getObjId());
//新增
addIndex(indexPath,indexField);
}
/** 
    * 优化索引,返回优化策略 
    *  
    * @return 
    */  
   private static LogMergePolicy optimizeIndex() {  
       LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();  
 
       // 设置segment添加文档(Document)时的合并频率  
       // 值较小,建立索引的速度就较慢  
       // 值较大,建立索引的速度就较快,>10适合批量建立索引  
       // 达到20个文件时就和合并  
       mergePolicy.setMergeFactor(20);  
 
       // 设置segment最大合并文档(Document)数  
       // 值较小有利于追加索引的速度  
       // 值较大,适合批量建立索引和更快的搜索  
      // mergePolicy.setMaxMergeDocs(5000);  
 
       // 启用复合式索引文件格式,合并多个segment  
       mergePolicy.setUseCompoundFile(true);  
       return mergePolicy;  
   }  
     
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值