中文分词器jieba结合LangChain
1.中文分词器jieba
from __future__ import annotations
from typing import Any, List
import jieba
from langchain_text_splitters.base import TextSplitter
class JiebaTextSplitter(TextSplitter):
"""使用 jieba 库进行中文分词的分词器."""
def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None:
"""初始化 Jieba 分词器."""
super().__init__(**kwargs)
self._separator = separator
def split_text(self, text: str) -> List[str]:
"""分割传入的文本并返回分块."""
splits = jieba.lcut(text)
return self._merge_splits(splits, self._separator)
2.LangChain对于英文支持良好,其本身就有
import nltk
nltk.download(‘punkt’)
导入即可
from __future__ import annotations
from typing import Any, List
from langchain_text_splitters.base import TextSplitter
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self, separator: str = "\n\n", language: str = "english", **kwargs: Any
) -> None:
"""Initialize the NLTK splitter."""
super().__init__(**kwargs)
try:
from nltk.tokenize import sent_tokenize
self._tokenizer = sent_tokenize
except ImportError:
raise ImportError(
"NLTK is not installed, please install it with `pip install nltk`."
)
self._separator = separator
self._language = language
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)