今天花了一个晚上的时间来调试下面这个“简单”的程序,结果还是没成功,也不知是什么原因,先记录下代码吧,可能有些知识还没学,不会用。
package
ch2.lucenedmo.delete;
import java.io.IOException;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
public class indexDelete
{
private String INDEX_STORE_PATH = " d:indexdelete " ;
public indexDelete()
{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH,
new MMAnalyzer(), true );
writer.setUseCompoundFile( false );
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field( " bookname " , " 钢铁是怎样炼成的 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f2 = new Field( " bookname " , " 英雄儿女 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f3 = new Field( " bookname " , " 篱笆女人和狗 " ,Field.Store.YES,Field.Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
IndexReader reader = IndexReader.open(INDEX_STORE_PATH);
System.out.println( " 索引文档列表…… " );
for ( int i = 0 ;i < reader.numDocs();i ++ )
{
System.out.println(reader.document(i));
}
System.out.println( " 版本: " + reader.getVersion());
System.out.println( " 索引内的文档数量: " + reader.numDocs());
System.out.println();
Term term1 = new Term( " bookname " , " 女 " );
TermDocs docs = reader.termDocs(term1);
while (docs.next())
{
System.out.println( " ------------ " );
System.out.println( " 含有所查找的《 " + term1 + " 》的Document的编号为 " + docs.doc());
System.out.println( "" + docs.freq());
System.out.println( " ------------ " );
}
reader.close();
}
public static void main(String[] args)
{
new indexDelete();
}
}
import java.io.IOException;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
public class indexDelete
{
private String INDEX_STORE_PATH = " d:indexdelete " ;
public indexDelete()
{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH,
new MMAnalyzer(), true );
writer.setUseCompoundFile( false );
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field( " bookname " , " 钢铁是怎样炼成的 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f2 = new Field( " bookname " , " 英雄儿女 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f3 = new Field( " bookname " , " 篱笆女人和狗 " ,Field.Store.YES,Field.Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
IndexReader reader = IndexReader.open(INDEX_STORE_PATH);
System.out.println( " 索引文档列表…… " );
for ( int i = 0 ;i < reader.numDocs();i ++ )
{
System.out.println(reader.document(i));
}
System.out.println( " 版本: " + reader.getVersion());
System.out.println( " 索引内的文档数量: " + reader.numDocs());
System.out.println();
Term term1 = new Term( " bookname " , " 女 " );
TermDocs docs = reader.termDocs(term1);
while (docs.next())
{
System.out.println( " ------------ " );
System.out.println( " 含有所查找的《 " + term1 + " 》的Document的编号为 " + docs.doc());
System.out.println( "" + docs.freq());
System.out.println( " ------------ " );
}
reader.close();
}
public static void main(String[] args)
{
new indexDelete();
}
}
终于找到病因了,首先上述的代码格式本来没有错但不好,请看下面的代码
package
ch3.lucenedemo.createindex;
import java.io.IOException;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
public class indexDelete
{
private static String INDEX_STORE_PATH = " d:/indexdelete1 " ;
public static void main(String[] args) throws IOException
{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH,
new MMAnalyzer(), true );
writer.setUseCompoundFile( false );
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field( " bookname " , " 钢铁是怎样炼成的 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f2 = new Field( " bookname " , " 英雄儿女 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f3 = new Field( " bookname " , " 篱笆女人和狗 " ,Field.Store.YES,Field.Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
IndexReader reader = IndexReader.open(INDEX_STORE_PATH);
System.out.println( " 索引文档列表…… " );
for ( int i = 0 ;i < reader.numDocs();i ++ )
{
System.out.println(reader.document(i));
}
System.out.println( " 版本: " + reader.getVersion());
System.out.println( " 索引内的文档数量: " + reader.numDocs());
System.out.println();
Term term1 = new Term( " bookname " , " 女 " );
TermDocs docs = reader.termDocs(term1);
while (docs.next())
{
System.out.println( " ------------ " );
System.out.println( " 含有所查找的《 " + term1 + " 》的Document的编号为 " + docs.doc());
System.out.println( "" + docs.freq());
System.out.println( " ------------ " );
}
reader.close();
}
}
import java.io.IOException;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
public class indexDelete
{
private static String INDEX_STORE_PATH = " d:/indexdelete1 " ;
public static void main(String[] args) throws IOException
{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH,
new MMAnalyzer(), true );
writer.setUseCompoundFile( false );
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field( " bookname " , " 钢铁是怎样炼成的 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f2 = new Field( " bookname " , " 英雄儿女 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f3 = new Field( " bookname " , " 篱笆女人和狗 " ,Field.Store.YES,Field.Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
IndexReader reader = IndexReader.open(INDEX_STORE_PATH);
System.out.println( " 索引文档列表…… " );
for ( int i = 0 ;i < reader.numDocs();i ++ )
{
System.out.println(reader.document(i));
}
System.out.println( " 版本: " + reader.getVersion());
System.out.println( " 索引内的文档数量: " + reader.numDocs());
System.out.println();
Term term1 = new Term( " bookname " , " 女 " );
TermDocs docs = reader.termDocs(term1);
while (docs.next())
{
System.out.println( " ------------ " );
System.out.println( " 含有所查找的《 " + term1 + " 》的Document的编号为 " + docs.doc());
System.out.println( "" + docs.freq());
System.out.println( " ------------ " );
}
reader.close();
}
}
注意的点:
1,.定义private String static INDEX_STORE_PATH中的static不能缺少!
2,public static void main(string args[])throws IOException 中的抛出异常绝不能少!
更加规范的的写法如下(我个人认为)
package
ch3.lucenedemo.createindex;
import java.io.IOException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
public class testindex
{
private static String INDEX_STORE_PATH = " d:/indexdelete " ;
public static void main(String[] args) throws IOException
{
testindex t = new testindex();
t.createindex(INDEX_STORE_PATH);
testindex.Readerindex(INDEX_STORE_PATH);
}
public void createindex(String INDEX_STORE_PATH) throws IOException
{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH,
new StandardAnalyzer(), true );
writer.setUseCompoundFile( false );
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field( " bookname " , " 钢铁是怎样炼成的 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f2 = new Field( " bookname " , " 英雄儿女 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f3 = new Field( " bookname " , " 篱笆女人和狗 " ,Field.Store.YES,Field.Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
}
public static void Readerindex(String path) throws IOException
{
IndexReader reader = IndexReader.open(INDEX_STORE_PATH);
System.out.println( " 索引文档列表…… " );
for ( int i = 0 ;i < reader.numDocs();i ++ )
{
System.out.println(reader.document(i));
}
System.out.println( " 版本: " + reader.getVersion());
System.out.println( " 索引内的文档数量: " + reader.numDocs());
System.out.println();
Term term1 = new Term( " bookname " , " 女 " );
TermDocs docs = reader.termDocs(term1);
while (docs.next())
{
System.out.println( " ------------ " );
System.out.println( " 含有所查找的《 " + term1 + " 》的Document的编号为 " + docs.doc());
System.out.println( "" + docs.freq());
System.out.println( " ------------ " );
}
reader.close();
}
}
import java.io.IOException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
public class testindex
{
private static String INDEX_STORE_PATH = " d:/indexdelete " ;
public static void main(String[] args) throws IOException
{
testindex t = new testindex();
t.createindex(INDEX_STORE_PATH);
testindex.Readerindex(INDEX_STORE_PATH);
}
public void createindex(String INDEX_STORE_PATH) throws IOException
{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH,
new StandardAnalyzer(), true );
writer.setUseCompoundFile( false );
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Field f1 = new Field( " bookname " , " 钢铁是怎样炼成的 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f2 = new Field( " bookname " , " 英雄儿女 " ,Field.Store.YES,Field.Index.TOKENIZED);
Field f3 = new Field( " bookname " , " 篱笆女人和狗 " ,Field.Store.YES,Field.Index.TOKENIZED);
doc1.add(f1);
doc2.add(f2);
doc3.add(f3);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.close();
}
public static void Readerindex(String path) throws IOException
{
IndexReader reader = IndexReader.open(INDEX_STORE_PATH);
System.out.println( " 索引文档列表…… " );
for ( int i = 0 ;i < reader.numDocs();i ++ )
{
System.out.println(reader.document(i));
}
System.out.println( " 版本: " + reader.getVersion());
System.out.println( " 索引内的文档数量: " + reader.numDocs());
System.out.println();
Term term1 = new Term( " bookname " , " 女 " );
TermDocs docs = reader.termDocs(term1);
while (docs.next())
{
System.out.println( " ------------ " );
System.out.println( " 含有所查找的《 " + term1 + " 》的Document的编号为 " + docs.doc());
System.out.println( "" + docs.freq());
System.out.println( " ------------ " );
}
reader.close();
}
}
就这个小小的问题搞了我这么长时间,不过有点收获哦!