Lucene4.7.2简单例子二

/*
 * Copyright (C) 2015 ShenZhen tianlang Co.,Ltd All Rights Reserved.
 * 未经本公司正式书面同意,其他任何个人、团体不得使用、复制、修改或发布本软件.
 * 版权所有深圳天狼服务有限公司 www.tianlang.com.
 */
package com.tg.lucene.userdemo;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringReader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;

/**
 *
 * @version 2016年11月25日上午10:24:12
 * @author wuliu
 */
public class Demo {
    // 源文件所在位置
    private static String filePath = "G:/lucene/a.txt";
    // 索引目录
    private static String indexDirectory = "G:/lucene/index2";
    // lucene版本
    private static final Version VERSION = Version.LUCENE_47;
    
    public static void createIndex() throws Throwable {
        // 创建Directory关联源文件
        Directory directory = FSDirectory.open(new File(indexDirectory));
        // 创建一个分词器
        Analyzer analyzer = new StandardAnalyzer(VERSION);
        // 创建索引的配置信息
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(VERSION, analyzer);
        IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig);
        indexWriter.deleteAll();
        Document document = new Document();
        //读取txt文件中的内容
        String fileContent = fileToStr(filePath);
        //标题StringField索引存储不分词
        Field field1 = new StringField("title", "lucene测试", Store.YES);
        //内容TextField索引分词(Store.NO不存储查询出来就是null)
        Field field2 = new TextField("content", fileContent, Store.YES);
        Field field3 = new DoubleField("version", 1.2, Store.YES);
        Field field4 = new IntField("score", 90, Store.YES);
        document.add(field1);
        document.add(field2);
        document.add(field3);
        document.add(field4);
        indexWriter.addDocument(document);
        indexWriter.commit();
        indexWriter.close();
    }
    
    /**
     * 读取文件内容
     *
     * @version 2016年11月25日上午10:33:03
     * @author wuliu
     * @param path
     * @return
     * @throws Throwable
     */
    private static String fileToStr(String path) throws Throwable{
        StringBuffer sb = new StringBuffer();
        InputStream is = new FileInputStream(path);
        InputStreamReader isr = new InputStreamReader(is, "GBK");//GBK和GB2312可以成功显示中文
        BufferedReader br = new BufferedReader(isr);
        String line = "";
        while ((line = br.readLine()) != null) {
            sb.append(line);
        }
        is.close();
        isr.close();
        br.close();
        return sb.toString();
    }
    
    /**
     * 查询
     *
     * @version 2016年11月25日上午10:42:45
     * @author wuliu
     */
    public static void search(String keyword) throws Throwable{
        IndexReader indexReader = DirectoryReader.open(FSDirectory.open(new File(indexDirectory)));
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);
        QueryParser queryParser = new QueryParser(VERSION, "content", new StandardAnalyzer(VERSION));
        Query query = queryParser.parse(keyword);
        TopDocs topDocs = indexSearcher.search(query, 100);
        System.out.println("查询结果总数:" + topDocs.totalHits);
        ScoreDoc scoreDoc[] = topDocs.scoreDocs;
        for(int i = 0;i < scoreDoc.length; i++){
            //得到文档id
            int id = scoreDoc[i].doc;
            Document document = indexSearcher.doc(id);
            System.out.println("内容:"+document.get("content"));
            System.out.println("标题:"+document.get("title"));
            System.out.println("版本:"+document.get("version"));
            System.out.println("评分:"+document.get("score"));
        }
    }
    
    public static void test() throws Throwable{
        String str = "我是中国人";
        //标准分词器(是把每个字都拆分开了)
//        Analyzer analyzer = new StandardAnalyzer(VERSION);
        //简单分词器(没做任何改变)
//        Analyzer analyzer = new SimpleAnalyzer(VERSION);
        //二元切分(将相邻的两个字做为一个词)
//        Analyzer analyzer = new CJKAnalyzer(VERSION);
        //语意分词
        Analyzer analyzer = new IKAnalyzer(false);
        //生成一个分词流
        TokenStream tokenStream = analyzer.tokenStream("content", new StringReader(str));
        //为token设置属性类
        CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
        //重新设置
        tokenStream.reset();
        while(tokenStream.incrementToken()){//遍历得到的token
            System.out.println(new String(charTermAttribute.buffer(),0,charTermAttribute.length())+" ");
        }
    }
    
    public static void main(String[] args) throws Throwable {
//        createIndex();
//        search("xml");
        test();
    }
    
}


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值