Lucene中同句段查询俩个短语

Lucene是Java创建索引中十分成熟且免费的建造索引的方法,对于初学者可以不必研究基本原理便可以建立简单的索引,具体的简单创建索引可以看下以下方法(若有缺漏请帮忙指出,感激不尽),但如果你想建立索引并且想查询俩个短语是否在同一句段,可以参考以下方法:

        请注意一下以下的代码中在构建BooleanQuery部分,关键词的初始化应该为String[],可以参考以下小编的初始化方法,对于关键词key1和key2的初始化,需注意的是:

①key1和key2应该用String[];

②可以先用String创建一个关键词key1_String然后使用split方法放入key1;

③Lucene无法识别"-"之类的符号,所以可以用replace方法代替;

④建议将要进行搜索的关键短语先预处理成小写,因为Lucene中查询时会按小写索引

以下为跟着实验室师兄建立索引并提取同时存在俩个短语的语句的索引,请直接看本文想着重分享的BooleanQuery部分:

public class LuceneFirst {
    private Object Version;

    @Test

    public void creat_Index() throws IOException {
//        索引库的路径
        String out_path = "C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\Index";
//        将索引库保存到磁盘文件中
        Directory directory = FSDirectory.open(new File(out_path).toPath());
//        基于Directory创建一个IndexWriter对象
        IndexWriter indexWriter = new IndexWriter(directory,new IndexWriterConfig());
//        将需要读取的文件的目录的路径保存到String中
        String read_directory = "E:\\JavaDevelop\\21暑假自学Java笔记摘要\\FILE类";
//        读取磁盘上的文件,对应每个文件创建一个文档对象
        String in_Path = "C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\test_json_to_text(100篇).txt";
        String out_Path = "C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\提取句子测试(Itetator 2.0版本).txt";
        String out_Table_Path = "C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\id表.txt";
        File file_Input = new File(in_Path);
        FileWriter fw = new FileWriter(out_Path,true);
        BufferedWriter bw = new BufferedWriter(new FileWriter(out_Path,true));
        BufferedWriter bw_table = new BufferedWriter(new FileWriter(out_Table_Path,true));
        BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(file_Input), "UTF-8"));//构造一个BufferedReader类来读取文件
        long length_Sentence = 0 ;
//      单个文件开始句子分割
        String file_string = null;
        bw_table.write("        "+"doc_id"+'\t'+'\t'+'\t'+'\t'+'\t'+'\t'+"sen_id_star"+'\t'+'\t'+"sen_id_end"+'\t'+'\t'+"abstract_length"+'\n');
        while((file_string = br.readLine())!=null)
        {
            String id_String = file_string.substring(file_string.indexOf("Paper_id=")+10,file_string.indexOf("Abstract")-1);
            String abstract_String = file_string.substring(file_string.indexOf("Abstract")+15,file_string.indexOf("Body_text=")-1);
            String body_String = file_string.substring(file_string.indexOf("Body_text")+11);
            BreakIterator iterator = BreakIterator.getSentenceInstance(Locale.US);
            iterator.setText(abstract_String);
            int start = iterator.first();
            long length_abstract = 0;
//            记录生成表的信息:
            bw_table.write(id_String+"\t\t\t"+length_Sentence+"\t\t\t");
            for (int end = iterator.next();
                 end != BreakIterator.DONE;
                 start = end, end = iterator.next()) {
                bw.write(length_Sentence + "\t" + abstract_String.substring(start, end) + '\n');
                length_Sentence++;
                length_abstract++;
                org.apache.lucene.document.Field field_Sentence = new TextField("Sentence",abstract_String.substring(start, end), org.apache.lucene.document.Field.Store.YES);
                org.apache.lucene.document.Field field_Number = new TextField("Number",length_Sentence+"", org.apache.lucene.document.Field.Store.YES);
                Document document = new Document();
                document.add(field_Sentence);
                document.add(field_Number);
                indexWriter.addDocument(document);
            }
            iterator.setText(body_String);
            start = iterator.first();
            for (int end = iterator.next();
                 end != BreakIterator.DONE;
                 start = end, end = iterator.next()) {
                bw.write(length_Sentence + "\t" + body_String.substring(start, end) + '\n');
                length_Sentence++;
                org.apache.lucene.document.Field field_Sentence = new TextField("Sentence",body_String.substring(start, end), org.apache.lucene.document.Field.Store.YES);
                org.apache.lucene.document.Field field_Number = new TextField("Number",length_Sentence+"", org.apache.lucene.document.Field.Store.YES);
                Document document = new Document();
                document.add(field_Sentence);
                document.add(field_Number);
                indexWriter.addDocument(document);
            }
            bw_table.write((length_Sentence-1)+"\t\t\t");
            bw_table.write(length_abstract+"\n");
        }
//            关闭indexWriter对象
        indexWriter.close();
        br.close();
        bw_table.close();
        bw.close();
        fw.close();

    }

    @Test
    public void searchIndex() throws IOException, ParseException {
//        创建读取doc_id表的BufferedReader
        BufferedReader br_ReadTable = new BufferedReader(new InputStreamReader(new FileInputStream("C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\id表.txt"),"UTF-8"));
//        生成HashMap
        String s = br_ReadTable.readLine();
        HashMap<Long,String> map = new LinkedHashMap<Long,String>();
        while((s = br_ReadTable.readLine())!= null){
            String[] id_String = s.split("\t\t\t");
            Long end_Number = Long.parseLong(id_String[2]);
            map.put(end_Number,id_String[0]);
        }
//        1、创建一个索引库对象指向索引库的位置
        String out_path = "C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\Index";
        Directory directory = FSDirectory.open(new File(out_path).toPath());
//        2、创建一个IndexReader对象
        IndexReader indexReader = DirectoryReader.open(directory);
//        3、创建一个IndexSearcher对象
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);
//        4、创建一个Query对象
//        在此输入key1和key2
        String key_String1 = "COVID-19";
        String key_String2 = "cancer";
        String[] key1 = key_String1.replace('-',' ').toLowerCase(Locale.ROOT).split(" ");
        String[] key2 = key_String2.toLowerCase(Locale.ROOT).split(" ");
//        对分割后的单词或短语创建查询
        PhraseQuery query1 = new PhraseQuery(0,"Sentence",key1);
        PhraseQuery query2 = new PhraseQuery(0,"Sentence",key2);
//        Query query1 = new TermQuery(new Term("Sentence",key1));
//        Query query2 = new TermQuery(new Term("Sentence",key2));
//        利用BooleanQuery查询俩个同时出现的关键词语句
        BooleanQuery.Builder query = new BooleanQuery.Builder();
//        满足查询的文档必须包含query1和query2
        query.add(query1,BooleanClause.Occur.MUST);
        query.add(query2,BooleanClause.Occur.MUST);
//        创建生成key1与key2匹配的所需信息表格
        String out_Write_Table_Path = "C:\\Users\\win10\\Desktop\\21暑假\\data\\toy\\json\\"+key_String1+'+'+key_String2+".txt";
        BufferedWriter bw_table = new BufferedWriter(new FileWriter(out_Write_Table_Path,true));
        bw_table.write("key1"+"\t\t\t"+"key2"+"\t\t\t"+"doc_id" +"\t\t\t"+"sen_text"+"\n");
//        5、执行查询,得到一个TopDocs对象
        TopDocs topDocs = indexSearcher.search(query.build(),999999999);
//        6、取查询结果的总记录数
        TotalHits totalHits = topDocs.totalHits;
        System.out.println("查询总记录数"+totalHits);
//        7、取文档列表
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
//        8、打印文档中的内容
        Long start_Time = System.nanoTime();System.out.println(start_Time);
        for(ScoreDoc scoreDoc: scoreDocs){
            //取文档ID
            int doc_Id = scoreDoc.doc;
            //根据id取文档对象
            Document document = indexSearcher.doc(doc_Id);
//            System.out.println(key1+"\t"+key2+"\t"+document.get("Sentence"));
            String doc_id = null;
//            System.out.println(document.get("Number"));
            long key_Number = Long.parseLong(document.get("Number"));
            for(Long end : map.keySet()){
                boolean test = false;
                if(key_Number<=end) {
                    doc_id = map.get(end);
                    System.out.println(map.get(end));
                    test = true;
                }
                if(test == true)    break;
            }
            System.out.println("-------------------寂寞的分割线");
            bw_table.write(key1+"\t\t\t"+key2+"\t\t\t"+doc_id+"\t\t\t"+document.get("Sentence")+'\n');
        }

        System.out.println(System.nanoTime());
//        9、关闭IndexReader对象
        indexReader.close();
        br_ReadTable.close();
        bw_table.close();
    }
}

        如果你也在为Lucene建立索引中要找到——俩个关键短语同时存在与一个句子
        而烦恼,那希望本文对你有帮助,小编初次编辑,有不详细处请见谅!!!

        

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值