解决数字和英文字母结合检索出现高亮重复问题

问题如下:

         数字与英文结合在一起检索,出现高亮重复问题

         如:检索“220”则

                         关于同意220kV佛山变电站#1、#2主变报废的批复 .txt                检索“220kv”则                          关于同意220220kV佛山变电站#1、#2主变报废的批复 .txt             高亮采用的是索引时记录Term的位置,高亮处理采用TermPositionVector termFreqVector = (TermPositionVector)ireader.getTermFreqVector(doc, fieldname);方式,其中主要代码如下:       QueryParser queryParser = new QueryParser(Version.LUCENE_30,fieldname,queryAnalyzer);
     Query query = queryParser.parse(keyWordLc);
     Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter(
       "<font color=/"red/">", "</font>"), new QueryScorer(
         query));
     highlighter.setTextFragmenter(new SimpleFragmenter(50));
      
     TermPositionVector termFreqVector = (TermPositionVector)ireader.getTermFreqVector(doc, fieldname);
     /**   
      * 注意这里最好设为true,虽然会影响性能,但是避免出现:
      * 文档题名为:索引测试新建文档1.txt
               * 查看tokens结果:[(1,8,9), (1.txt,8,13), (文档,6,8), (新建,4,6), (测试,2,4), (索引,0,2), (txt,10,13)]
               * 这样高亮显示的时候<font color="red">索引测试新建文档</font>1.txt
               * 因为高亮显示的方法里是按位置信息,当当前匹配的term小于前面最大的最后位置时才去高亮,
               * 不然则在最后获取到最小匹配的term的首位置到最后匹配的term的末位置的字符串全部高亮起来了。
      */
           TokenStream tokenStream = TokenSources.getTokenStream(termFreqVector,true);  
          
           String content = hitDoc.get(fieldname);
           String result = highlighter.getBestFragments(tokenStream, content, 5,"...");         通过调试跟踪,paoding分词器对“220kv”会分词为“220   kv    220kv”,而通过Lucene提供的lucene-highlighter-3.0.2.jar、lucene-memory-3.0.2.jar

解决方法是修改lucene-highlighter-3.0.2.jar中Highlighter类,代码如下:

      public final TextFragment[] getBestTextFragments(
  TokenStream tokenStream,
  String text,
  boolean mergeContiguousFragments,
  int maxNumFragments)
  throws IOException, InvalidTokenOffsetsException
 {
  ArrayList<TextFragment> docFrags = new ArrayList<TextFragment>();
  StringBuilder newText=new StringBuilder();
  
     TermAttribute termAtt = tokenStream.addAttribute(TermAttribute.class);
     OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
     tokenStream.addAttribute(PositionIncrementAttribute.class);
     tokenStream.reset();
    
  TextFragment currentFrag = new TextFragment(newText,newText.length(), docFrags.size());
  TokenStream newStream = fragmentScorer.init(tokenStream);
  if(newStream != null) {
    tokenStream = newStream;
  }
  fragmentScorer.startFragment(currentFrag);
  docFrags.add(currentFrag);   FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);   try
  {    String tokenText;
   int startOffset;
   int endOffset;
   int lastEndOffset = 0;
   int lastStartOffset = 0;  //用来记录当前所取的字符串起点位置    textFragmenter.start(text, tokenStream);    TokenGroup tokenGroup=new TokenGroup(tokenStream);    for (boolean next = tokenStream.incrementToken(); next && (offsetAtt.startOffset()< maxDocCharsToAnalyze);
         next = tokenStream.incrementToken())
   {
    if( (offsetAtt.endOffset()>text.length())
     ||
     (offsetAtt.startOffset()>text.length())
     )      
    {
     throw new InvalidTokenOffsetsException("Token "+ termAtt.term()
       +" exceeds length of provided text sized "+text.length());
    }
    if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct()))
    {
     //the current token is distinct from previous tokens -
     // markup the cached token group info
     startOffset = tokenGroup.matchStartOffset;
     endOffset = tokenGroup.matchEndOffset;
     
      //用下面两行替代代码tokenText = text.substring(startOffset, endOffset);
     //解决“数字+英文或英文+数字”格式关键词出现高亮重复问题,如:检索“220KV”会高亮“220220KV”
     lastStartOffset = Math.max(startOffset, lastEndOffset);
     tokenText = text.substring(lastStartOffset, endOffset);
     
     String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
     //store any whitespace etc from between this and last group
     if (startOffset > lastEndOffset)
      newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
     newText.append(markedUpText);
     lastEndOffset=Math.max(endOffset, lastEndOffset);
     
     tokenGroup.clear();      //check if current token marks the start of a new fragment
     if(textFragmenter.isNewFragment())
     {
      currentFrag.setScore(fragmentScorer.getFragmentScore());
      //record stats for a new fragment
      currentFrag.textEndPos = newText.length();
      currentFrag =new TextFragment(newText, newText.length(), docFrags.size());
      fragmentScorer.startFragment(currentFrag);
      docFrags.add(currentFrag);
     }
    }     tokenGroup.addToken(fragmentScorer.getTokenScore()); //    if(lastEndOffset>maxDocBytesToAnalyze)
//    {
//     break;
//    }
   }
   currentFrag.setScore(fragmentScorer.getFragmentScore());    if(tokenGroup.numTokens>0)
   {
    //flush the accumulated text (same code as in above loop)
    startOffset = tokenGroup.matchStartOffset;
    endOffset = tokenGroup.matchEndOffset;
    tokenText = text.substring(startOffset, endOffset);
    String markedUpText=formatter.highlightTerm(encoder.encodeText(tokenText), tokenGroup);
    //store any whitespace etc from between this and last group
    if (startOffset > lastEndOffset)
     newText.append(encoder.encodeText(text.substring(lastEndOffset, startOffset)));
    newText.append(markedUpText);
    lastEndOffset=Math.max(lastEndOffset,endOffset);
   }    //Test what remains of the original text beyond the point where we stopped analyzing
   if (
//     if there is text beyond the last token considered..
     (lastEndOffset < text.length())
     &&
//     and that text is not too large...
     (text.length()<= maxDocCharsToAnalyze)
    )    
   {
    //append it to the last fragment
    newText.append(encoder.encodeText(text.substring(lastEndOffset)));
   }    currentFrag.textEndPos = newText.length();    //sort the most relevant sections of the text
   for (Iterator<TextFragment> i = docFrags.iterator(); i.hasNext();)
   {
    currentFrag = i.next();     //If you are running with a version of Lucene before 11th Sept 03
    // you do not have PriorityQueue.insert() - so uncomment the code below
    /*
         if (currentFrag.getScore() >= minScore)
         {
          fragQueue.put(currentFrag);
          if (fragQueue.size() > maxNumFragments)
          { // if hit queue overfull
           fragQueue.pop(); // remove lowest in hit queue
           minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
          }
         }
    */
    //The above code caused a problem as a result of Christoph Goller's 11th Sept 03
    //fix to PriorityQueue. The correct method to use here is the new "insert" method
    // USE ABOVE CODE IF THIS DOES NOT COMPILE!
    fragQueue.insertWithOverflow(currentFrag);
   }    //return the most relevant fragments
   TextFragment frag[] = new TextFragment[fragQueue.size()];
   for (int i = frag.length - 1; i >= 0; i--)
   {
    frag[i] = fragQueue.pop();
   }    //merge any contiguous fragments to improve readability
   if(mergeContiguousFragments)
   {
    mergeContiguousFragments(frag);
    ArrayList<TextFragment> fragTexts = new ArrayList<TextFragment>();
    for (int i = 0; i < frag.length; i++)
    {
     if ((frag[i] != null) && (frag[i].getScore() > 0))
     {
      fragTexts.add(frag[i]);
     }
    }
    frag= fragTexts.toArray(new TextFragment[0]);
   }    return frag;   }
  finally
  {
   if (tokenStream != null)
   {
    try
    {
     tokenStream.close();
    }
    catch (Exception e)
    {
    }
   }
  }
 }
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值