SOAPbody.addDocument(doc)出现NAMESPACE_ERR 错误

org.w3c.dom.DOMException: NAMESPACE_ERR: An attempt is made to create or change an object in a way which is incorrect with regard to namespaces.



DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
                      

 docFactory.setNamespaceAware(true);   //添加这一句就不会出错了
                      

 DocumentBuilder builder = docFactory .newDocumentBuilder();
                       

Document doc = builder.parse(new ByteArrayInputStream(out.toByteArray()));
                      

body.addDocument(doc);






  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
说明:依赖jar包:lucene-core-2.3.2.jar、IKAnalyzer3.2.8.jar。 一、LuceneUtil 工具类代码: package com.zcm.lucene; import java.io.File; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Hits; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.wltea.analyzer.IKSegmentation; import org.wltea.analyzer.Lexeme; /** * Apache Lucene全文检索和IKAnalyzer分词工具类 * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneUtil { /**索引创建的路径**/ private static String LucenePath = "d://index"; /** * 创建索引 * @throws Exception */ public static int createIndex(List list) throws Exception{ /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); /**注意最后一个boolean类型的参数:表示是否重新创建,true表示新创建(以前存在时回覆盖)**/ IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); for (int i = 0; i < list.size(); i++) { LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**查看IndexWriter里面有多少个索引**/ int num = indexWriter.docCount(); System.out.println("总共------》" + num); indexWriter.optimize(); indexWriter.close(); return num; } /** * IKAnalyzer分词 * @param word * @return * @throws IOException */ public static List tokenWord(String word) throws IOException{ List tokenArr = new ArrayList(); StringReader reader = new StringReader(word); /**当为true时,分词器进行最大词长切分**/ IKSegmentation ik = new IKSegmentation(reader, true); Lexeme lexeme = null; while ((lexeme = ik.next()) != null){ tokenArr.add(lexeme.getLexemeText()); } return tokenArr; } /** * 创建索引(单个) * @param list * @throws Exception */ public static void addIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer, false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void addIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 更新索引(单个) * @param list * @throws Exception */ public static void updateIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void updateIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(单个) * @param list * @throws Exception */ public static void deleteIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void deleteIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**删除索引 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 检索数据 * @param word * @return */ public static List search(String word) { List list = new ArrayList(); Hits hits = null; try { IndexSearcher searcher = new IndexSearcher(LucenePath); String[] queries = {word,word}; String[] fields = {"title", "remark"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD}; Query query = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer()); if (searcher != null) { /**hits结果**/ hits = searcher.search(query); LuceneVO vo = null; for (int i = 0; i < hits.length(); i++) { Document doc = hits.doc(i); vo = new LuceneVO(); vo.setAid(Integer.parseInt(doc.get("aid"))); vo.setRemark(doc.get("remark")); vo.setTitle(doc.get("title")); list.add(vo); } } } catch (Exception ex) { ex.printStackTrace(); } return list; } } 二、Lucene用到的JavaBean代码: package com.zcm.lucene; /** * Apache Lucene全文检索用到的Bean * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneVO { private Integer aid; /**文章ID**/ private String title; /**文章标题**/ private String remark; /**文章摘要**/ public Integer getAid() { return aid; } public void setAid(Integer aid) { this.aid = aid; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getRemark() { return remark; } public void setRemark(String remark) { this.remark = remark; } } 备注:源码来源于www.91zcm.com 开源博客中的全文检索代码。(http://www.91zcm.com/)
示例代码: //src要创建索引的文件,destDir索引存放的目录 public static void createIndex(File src, File destDir){ Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); //创建一个语法分析器 IndexWriter iwriter = null; Directory directory = null; try { directory = FSDirectory.open(destDir); //把索引文件存储到磁盘目录 //创建一个IndexWriter(存放索引文件的目录,分析器,Field的最大长度) iwriter = new IndexWriter(directory, analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED); //iwriter.setUseCompoundFile(true);//使用复合文件 Document doc = new Document(); //创建一个Document对象 //把文件路径作为"path"域:不分词,索引,保存 doc.add(new Field("path", src.getCanonicalPath(), Field.Store.YES, Field.Index.NOT_ANALYZED)); StringBuilder sb = new StringBuilder(); BufferedReader br = new BufferedReader(new FileReader(src)); for(String str = null; (str = br.readLine())!=null;){ sb.append(str).append(System.getProperty("line.separator")); } //文件内容作为"content"域:分词,索引,保存 doc.add(new Field("contents", sb.toString(), Field.Store.YES, Field.Index.ANALYZED)); iwriter.addDocument(doc); //把Document存放到IndexWriter中 iwriter.optimize(); //对索引进行优化 } catch (IOException e) { e.printStackTrace(); } finally { if (iwriter != null) { try { iwriter.close(); //关闭IndexWriter时,才把内存中的数据写到文件 } catch (IOException e) { e.printStackTrace(); } } if (directory != null) { try { directory.close(); //关闭索引存放目录 } catch (IOException e) { e.printStackTrace(); } } } } //src要创建索引的文件,destDir索引存放的目录 public static void createIndex(File src, File destDir){ Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); //创建一个语法分析器 IndexWriter iwriter = null; Directory directory = null; try { directory = FSDirectory.open(destDir); //把索引文件存储到磁盘目录 //创建一个IndexWriter(存放索引文件的目录,分析器,Field的最大长度) iwriter = new IndexWriter(directory, analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED); //iwriter.setUseCompoundFile(true);//使用复合文件 Document doc = new Document(); //创建一个Document对象 //把文件路径作为"path"域:不分词,索引,保存 doc.add(new Field("path", src.getCanonicalPath(), Field.Store.YES, Field.Index.NOT_ANALYZED)); StringBuilder sb = new StringBuilder(); BufferedReader br = new BufferedReader(new FileReader(src)); for(String str = null; (str = br.readLine())!=null;){ sb.append(str).append(System.getProperty("line.separator")); } //文件内容作为"content"域:分词,索引,保存 doc.add(new Field("contents", sb.toString(), Field.Store.YES, Field.Index.ANALYZED)); iwriter.addDocument(doc); //把Document存放到IndexWriter中 iwriter.optimize(); //对索引进行优化 } catch (IOException e) { e.printStackTrace(); } finally { if (iwriter != null) { try { iwriter.close(); //关闭IndexWriter时,才把内存中的数据写到文件 } catch (IOException e) { e.printStackTrace(); } } if (directory != null) { try { directory.close(); //关闭索引存放目录 } catch (IOException e) { e.printStackTrace(); } } } } 6. 查询索引 1) IndexSearcher: 索引查询器 a) 构造器: IndexSearcher(Directory path, boolean readOnly) b) 常用方法: TopDocs search(Query query, Filter filter, int n); //执行查询。n指的是最多返回的Document的数量。 Document doc(int 文件内部编号); //根据文档的内部编号获取到该Document void close(); //关闭查询器 2) Query: 查询对象。把用户输入的查询字符串封装成Lucene能够识别的Query对象。 3) Filter: 用来过虑搜索结果的对象。 4) TopDocs: 代表查询结果集信息对象。它有两个属性: a) totalHits: 查询命中数。 b) scoreDocs: 查询结果信息。它包含符合条件的Document的内部编号(doc)及评分(score)。 5) 示例代码: //keyword要搜索的关键字。indexDir索引存放的目录 public static void searcher(String keyword, File indexDir){ IndexSearcher isearcher = null; Directory directory = null; try{ Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); directory = FSDirectory.open(indexDir); //创建解析器 QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer); Query query = parser.parse(keyword);//获取查询对象 // Query query1 = new TermQuery(new Term("contents", keyword)); // Query query2 = new TermQuery(new Term("contents", keyword2)); // BooleanQuery query = new BooleanQuery(); // query.add(query1, Occur.SHOULD); // query.add(query2, Occur.SHOULD); // QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[]{"path", "contents"}, analyzer); // Query query = parser.parse(keyword); isearcher = new IndexSearcher(directory, true); //创建索引搜索器 TopDocs ts = isearcher.search(query, null, 100); //执行搜索,获取查询结果集对象 int totalHits = ts.totalHits; //获取命中数 System.out.println("命中数:" + totalHits); ScoreDoc[] hits = ts.scoreDocs; //获取命中的文档信息对象 for (int i = 0; i < hits.length; i++) { Document hitDoc = isearcher.doc(hits[i].doc); //根据命中的文档的内部编号获取该文档 System.out.println(hitDoc.getField("contents").stringValue()); //输出该文档指定域的值 } } catch (IOException e) { e.printStackTrace(); } catch (ParseException e) { e.printStackTrace(); } finally { if (isearcher != null) { try { isearcher.close(); //关闭搜索器 } catch (IOException e) { e.printStackTrace(); } } if (directory != null) { try { directory.close(); //关闭索引存放目录 } catch (IOException e) { e.printStackTrace(); } } } } //keyword要搜索的关键字。indexDir索引存放的目录 public static void searcher(String keyword, File indexDir){ IndexSearcher isearcher = null; Directory directory = null; try{ Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); directory = FSDirectory.open(indexDir); //创建解析器 QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer); Query query = parser.parse(keyword);//获取查询对象 // Query query1 = new TermQuery(new Term("contents", keyword)); // Query query2 = new TermQuery(new Term("contents", keyword2)); // BooleanQuery query = new BooleanQuery(); // query.add(query1, Occur.SHOULD); // query.add(query2, Occur.SHOULD); // QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[]{"path", "contents"}, analyzer); // Query query = parser.parse(keyword); isearcher = new IndexSearcher(directory, true); //创建索引搜索器 TopDocs ts = isearcher.search(query, null, 100); //执行搜索,获取查询结果集对象 int totalHits = ts.totalHits; //获取命中数 System.out.println("命中数:" + totalHits); ScoreDoc[] hits = ts.scoreDocs; //获取命中的文档信息对象 for (int i = 0; i < hits.length; i++) { Document hitDoc = isearcher.doc(hits[i].doc); //根据命中的文档的内部编号获取该文档 System.out.println(hitDoc.getField("contents").stringValue()); //输出该文档指定域的值 } } catch (IOException e) { e.printStackTrace(); } catch (ParseException e) { e.printStackTrace(); } finally { if (isearcher != null) { try { isearcher.close(); //关闭搜索器 } catch (IOException e) { e.printStackTrace(); } } if (directory != null) { try { directory.close(); //关闭索引存放目录 } catch (IOException e) { e.printStackTrace(); } } } } 7. 删除索引 IndexWriter提供deleteDocuments(Term term); //会删除索引文件里含有指定Term的所有Document。 IndexReader也提供了deleteDocuments(Term term);

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值