python的span方法_Python Span.set_extension方法代码示例

# 需要导入模块: from spacy.tokens import Span [as 别名]

# 或者: from spacy.tokens.Span import set_extension [as 别名]

def __init__(

self,

nlp,

language="en_clinical",

ent_types=list(),

pseudo_negations=list(),

preceding_negations=list(),

following_negations=list(),

termination=list(),

chunk_prefix=list(),

):

if not language in LANGUAGES:

raise KeyError(

f"{language} not found in languages termset. "

"Ensure this is a supported language or specify "

"your own termsets when initializing Negex."

)

termsets = LANGUAGES[language]

if not Span.has_extension("negex"):

Span.set_extension("negex", default=False, force=True)

if not pseudo_negations:

if not "pseudo_negations" in termsets:

raise KeyError("pseudo_negations not specified for this language.")

pseudo_negations = termsets["pseudo_negations"]

if not preceding_negations:

if not "preceding_negations" in termsets:

raise KeyError("preceding_negations not specified for this language.")

preceding_negations = termsets["preceding_negations"]

if not following_negations:

if not "following_negations" in termsets:

raise KeyError("following_negations not specified for this language.")

following_negations = termsets["following_negations"]

if not termination:

if not "termination" in termsets:

raise KeyError("termination not specified for this language.")

termination = termsets["termination"]

# efficiently build spaCy matcher patterns

self.pseudo_patterns = list(nlp.tokenizer.pipe(pseudo_negations))

self.preceding_patterns = list(nlp.tokenizer.pipe(preceding_negations))

self.following_patterns = list(nlp.tokenizer.pipe(following_negations))

self.termination_patterns = list(nlp.tokenizer.pipe(termination))

self.matcher = PhraseMatcher(nlp.vocab, attr="LOWER")

self.matcher.add("pseudo", None, *self.pseudo_patterns)

self.matcher.add("Preceding", None, *self.preceding_patterns)

self.matcher.add("Following", None, *self.following_patterns)

self.matcher.add("Termination", None, *self.termination_patterns)

self.nlp = nlp

self.ent_types = ent_types

self.chunk_prefix = list(nlp.tokenizer.pipe(chunk_prefix))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值