Elasticsearch hanlp分词大小写问题解决

1. 先新增分词器 my_hanlp,分析器 my_hanlp_analyzer,并设置过滤器小写字母;

// 在hanlp基础上自定义分词器
PUT demo1/
{
  "settings": {
    "analysis": {
      "analyzer": {
        "my_hanlp_analyzer": {
          "tokenizer": "my_hanlp",
           "filter":"lowercase"
        }
      },
      "tokenizer": {
        "my_hanlp": {
          "type": "hanlp_index",
          "enable_stop_dictionary": true,
          "enable_custom_config": true
        }
      }
    }
  },
   "mappings": {
    "properties": {
      "content": {
        "type": "text",
        "analyzer": "my_hanlp_analyzer"
      }
    }
  }
}

2. 增加两条测试数据

POST /demo1/_doc/1
{
  "title":"我们 us wELCOME To Beijing.",
  "content":"1 this is a test CONTENT。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前的可口可乐瓶子移开。"
}

POST /demo1/_doc/2
{
  "title":"2 我们 us welcome To Beijing. ",
  "content":"2 This is a test content 2 Welcome TO BEIJING。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前的可口可乐瓶子移开"
}

3. 测试大小写查询语句

GET /demo1/_search
{
  "query": {
    "multi_match": {
      "query": "BEIjing",
      "fields": [
        "title",
        "content"
      ]
    }
  },
  "highlight": {
    "fields": {
      "title": {},
      "content": {}
    }
  }
}

结果:

{
  "took" : 1,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 2,
      "relation" : "eq"
    },
    "max_score" : 0.6602099,
    "hits" : [
      {
        "_index" : "demo1",
        "_type" : "_doc",
        "_id" : "2",
        "_score" : 0.6602099,
        "_source" : {
          "title" : "2 我们 us welcome To Beijing. ",
          "content" : "2 This is a test content 2 Welcome TO BEIJING。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前的可口可乐瓶子移开"
        },
        "highlight" : {
          "title" : [
            "2 我们 us welcome To <em>Beijing</em>."
          ],
          "content" : [
            "2 This is a test content 2 Welcome TO <em>BEIJING</em>。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前的可口可乐瓶子移开"
          ]
        }
      },
      {
        "_index" : "demo1",
        "_type" : "_doc",
        "_id" : "1",
        "_score" : 0.18824537,
        "_source" : {
          "title" : "我们 us wELCOME To Beijing.",
          "content" : "1 this is a test CONTENT。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前的可口可乐瓶子移开。"
        },
        "highlight" : {
          "title" : [
            "我们 us wELCOME To <em>Beijing</em>."
          ]
        }
      }
    ]
  }
}

4. 分词器测试

原生hanlp_index 索引分词器

// 原生hanlp_index分词器
POST demo1/_analyze
{
  "tokenizer": "hanlp_index",
  "text": [
    "This is a test content 2 Welcome TO BEIJING。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前"
  ]
}

// 分词结果
{
  "tokens" : [
    {
      "token" : "This",
      "start_offset" : 0,
      "end_offset" : 4,
      "type" : "nx",
      "position" : 0
    },
    {
      "token" : "is",
      "start_offset" : 5,
      "end_offset" : 7,
      "type" : "nx",
      "position" : 1
    },
    {
      "token" : "a",
      "start_offset" : 8,
      "end_offset" : 9,
      "type" : "nx",
      "position" : 2
    },
    {
      "token" : "test",
      "start_offset" : 10,
      "end_offset" : 14,
      "type" : "nx",
      "position" : 3
    },
    {
      "token" : "content",
      "start_offset" : 15,
      "end_offset" : 22,
      "type" : "nx",
      "position" : 4
    },
    {
      "token" : "2",
      "start_offset" : 23,
      "end_offset" : 24,
      "type" : "m",
      "position" : 5
    },
    {
      "token" : "Welcome",
      "start_offset" : 25,
      "end_offset" : 32,
      "type" : "nx",
      "position" : 6
    },
    {
      "token" : "TO",
      "start_offset" : 33,
      "end_offset" : 35,
      "type" : "nx",
      "position" : 7
    },
    {
      "token" : "BEIJING",
      "start_offset" : 36,
      "end_offset" : 43,
      "type" : "nx",
      "position" : 8
    },
    {
      "token" : "。",
      "start_offset" : 43,
      "end_offset" : 44,
      "type" : "w",
      "position" : 9
    },
    {
      "token" : "在",
      "start_offset" : 45,
      "end_offset" : 46,
      "type" : "p",
      "position" : 10
    },
    {
      "token" : "对阵",
      "start_offset" : 46,
      "end_offset" : 48,
      "type" : "vn",
      "position" : 11
    },
    {
      "token" : "匈牙利",
      "start_offset" : 48,
      "end_offset" : 51,
      "type" : "nsf",
      "position" : 12
    },
    {
      "token" : "的",
      "start_offset" : 51,
      "end_offset" : 52,
      "type" : "ude1",
      "position" : 13
    },
    {
      "token" : "赛前",
      "start_offset" : 52,
      "end_offset" : 54,
      "type" : "t",
      "position" : 14
    },
    {
      "token" : "新闻",
      "start_offset" : 54,
      "end_offset" : 56,
      "type" : "n",
      "position" : 15
    },
    {
      "token" : "发布会",
      "start_offset" : 56,
      "end_offset" : 59,
      "type" : "n",
      "position" : 16
    },
    {
      "token" : "发布",
      "start_offset" : 56,
      "end_offset" : 58,
      "type" : "v",
      "position" : 17
    },
    {
      "token" : "上",
      "start_offset" : 59,
      "end_offset" : 60,
      "type" : "f",
      "position" : 18
    },
    {
      "token" : ",",
      "start_offset" : 60,
      "end_offset" : 61,
      "type" : "w",
      "position" : 19
    },
    {
      "token" : "C",
      "start_offset" : 61,
      "end_offset" : 62,
      "type" : "nx",
      "position" : 20
    },
    {
      "token" : "罗",
      "start_offset" : 62,
      "end_offset" : 63,
      "type" : "b",
      "position" : 21
    },
    {
      "token" : "将",
      "start_offset" : 63,
      "end_offset" : 64,
      "type" : "d",
      "position" : 22
    },
    {
      "token" : "自己",
      "start_offset" : 64,
      "end_offset" : 66,
      "type" : "rr",
      "position" : 23
    },
    {
      "token" : "面前",
      "start_offset" : 66,
      "end_offset" : 68,
      "type" : "f",
      "position" : 24
    }
  ]
}

  my_hanlp分词器,在hanlp_index基础上,去停词和使用自定义字典;

// my_hanlp分词器,在hanlp_index基础上,去停词和使用自定义字典
POST demo1/_analyze
{
  "tokenizer": "my_hanlp",
  "text": [
    "This is a test content 2 Welcome TO BEIJING。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前"
  ]
}

// 分词结果
{
  "tokens" : [
    {
      "token" : "This",
      "start_offset" : 0,
      "end_offset" : 4,
      "type" : "nx",
      "position" : 0
    },
    {
      "token" : "a",
      "start_offset" : 8,
      "end_offset" : 9,
      "type" : "nx",
      "position" : 1
    },
    {
      "token" : "test",
      "start_offset" : 10,
      "end_offset" : 14,
      "type" : "nx",
      "position" : 2
    },
    {
      "token" : "content",
      "start_offset" : 15,
      "end_offset" : 22,
      "type" : "nx",
      "position" : 3
    },
    {
      "token" : "2",
      "start_offset" : 23,
      "end_offset" : 24,
      "type" : "m",
      "position" : 4
    },
    {
      "token" : "Welcome",
      "start_offset" : 25,
      "end_offset" : 32,
      "type" : "nx",
      "position" : 5
    },
    {
      "token" : "TO",
      "start_offset" : 33,
      "end_offset" : 35,
      "type" : "nx",
      "position" : 6
    },
    {
      "token" : "BEIJING",
      "start_offset" : 36,
      "end_offset" : 43,
      "type" : "nx",
      "position" : 7
    },
    {
      "token" : "对阵",
      "start_offset" : 46,
      "end_offset" : 48,
      "type" : "vn",
      "position" : 8
    },
    {
      "token" : "匈牙利",
      "start_offset" : 48,
      "end_offset" : 51,
      "type" : "nsf",
      "position" : 9
    },
    {
      "token" : "赛前",
      "start_offset" : 52,
      "end_offset" : 54,
      "type" : "t",
      "position" : 10
    },
    {
      "token" : "新闻",
      "start_offset" : 54,
      "end_offset" : 56,
      "type" : "n",
      "position" : 11
    },
    {
      "token" : "发布会",
      "start_offset" : 56,
      "end_offset" : 59,
      "type" : "n",
      "position" : 12
    },
    {
      "token" : "发布",
      "start_offset" : 56,
      "end_offset" : 58,
      "type" : "v",
      "position" : 13
    },
    {
      "token" : "C",
      "start_offset" : 61,
      "end_offset" : 62,
      "type" : "nx",
      "position" : 14
    },
    {
      "token" : "罗",
      "start_offset" : 62,
      "end_offset" : 63,
      "type" : "b",
      "position" : 15
    },
    {
      "token" : "面前",
      "start_offset" : 66,
      "end_offset" : 68,
      "type" : "f",
      "position" : 16
    }
  ]
}

5. 分析器 my_hanlp_analyzer 分词; 在my_hanlp分词器基础上,转小写;

POST demo1/_analyze
{
  "analyzer": "my_hanlp_analyzer",
  "text": [
    "This is a test content 2 Welcome TO BEIJING。 在对阵匈牙利的赛前新闻发布会上,C罗将自己面前"
  ]
}

分词结果:

{
  "tokens" : [
    {
      "token" : "this",
      "start_offset" : 0,
      "end_offset" : 4,
      "type" : "nx",
      "position" : 0
    },
    {
      "token" : "a",
      "start_offset" : 8,
      "end_offset" : 9,
      "type" : "nx",
      "position" : 1
    },
    {
      "token" : "test",
      "start_offset" : 10,
      "end_offset" : 14,
      "type" : "nx",
      "position" : 2
    },
    {
      "token" : "content",
      "start_offset" : 15,
      "end_offset" : 22,
      "type" : "nx",
      "position" : 3
    },
    {
      "token" : "2",
      "start_offset" : 23,
      "end_offset" : 24,
      "type" : "m",
      "position" : 4
    },
    {
      "token" : "welcome",
      "start_offset" : 25,
      "end_offset" : 32,
      "type" : "nx",
      "position" : 5
    },
    {
      "token" : "to",
      "start_offset" : 33,
      "end_offset" : 35,
      "type" : "nx",
      "position" : 6
    },
    {
      "token" : "beijing",
      "start_offset" : 36,
      "end_offset" : 43,
      "type" : "nx",
      "position" : 7
    },
    {
      "token" : "对阵",
      "start_offset" : 46,
      "end_offset" : 48,
      "type" : "vn",
      "position" : 8
    },
    {
      "token" : "匈牙利",
      "start_offset" : 48,
      "end_offset" : 51,
      "type" : "nsf",
      "position" : 9
    },
    {
      "token" : "赛前",
      "start_offset" : 52,
      "end_offset" : 54,
      "type" : "t",
      "position" : 10
    },
    {
      "token" : "新闻",
      "start_offset" : 54,
      "end_offset" : 56,
      "type" : "n",
      "position" : 11
    },
    {
      "token" : "发布会",
      "start_offset" : 56,
      "end_offset" : 59,
      "type" : "n",
      "position" : 12
    },
    {
      "token" : "发布",
      "start_offset" : 56,
      "end_offset" : 58,
      "type" : "v",
      "position" : 13
    },
    {
      "token" : "c",
      "start_offset" : 61,
      "end_offset" : 62,
      "type" : "nx",
      "position" : 14
    },
    {
      "token" : "罗",
      "start_offset" : 62,
      "end_offset" : 63,
      "type" : "b",
      "position" : 15
    },
    {
      "token" : "面前",
      "start_offset" : 66,
      "end_offset" : 68,
      "type" : "f",
      "position" : 16
    }
  ]
}

6. 总结

  • 如果analyzer中不添加filter:lowercase,则索引后会区分大小写,即搜索文字必须和原文的大小写一致才能被检索到;
  • title字段没有设置my_hanlp_analyzer,是默认的text类型,match时不区分大小写;

其他:

tokenizer、analyzer、filter 的概念参考改文章: https://cloud.tencent.com/developer/article/1706529

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值