其实没啥技术含量啦(麻烦在于配置java与Stanford环境)
这是只用StanfordParser的:
import sys
import nltk,os
from nltk.parse.stanford import StanfordParser
nltk.internals.config_java("D:/Program Files/Java/jdk1.8.0_111/bin/java.exe")
java_path = "D:/Program Files/Java/jdk1.8.0_111/bin/java.exe"
os.environ['JAVAHOME'] = java_path
eng_parser = StanfordParser("stanford-parser.jar",
"stanford-parser-3.9.1-models.jar",
"englishPCFG.ser.gz")
sentence = 'I love u'
res = list(eng_parser.parse(sentence.split()))
res
结果:
[Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ['love']), Tree('NP', [Tree('NN', ['u'])])])])])]
后来发现有个StanfordCoreNLP,就很强
from stanfordcorenlp import StanfordCoreNLP
from nltk.tree import Tree
nlp = StanfordCoreNLP('stanford-corenlp-full-2018-10-05')
s = 'At the end of the day, successfully launching a new product means reaching the right audience and consistently delivering a very convincing message. To avoid spending money recklessly because of disjointed strategies, we have developed several recommendations.'
# print ('Tokenize:', nlp.word_tokenize(s))
# print ('Part of Speech:', nlp.pos_tag(s))
# print ('Named Entities:', nlp.ner(s))
print ('Constituency Parsing:', nlp.parse(s))#语法树
# print ('Dependency Parsing:', nlp.dependency_parse(s))#依存句法
tree=Tree.fromstring(nlp.parse(s))
tree.draw()
nlp.close()#释放,否则后端服务器将消耗大量内存
结果:
Constituency Parsing: (ROOT
(S
(PP (IN At)
(NP
(NP (DT the) (NN end))
(PP (IN of)
(NP (DT the) (NN day)))))
(, ,)
(S
(ADVP (RB successfully))
(VP (VBG launching)
(NP (DT a) (JJ new) (NN product))))
(VP (VBZ means)
(S
(VP
(VP (VBG reaching)
(NP (DT the) (JJ right) (NN audience)))
(CC and)
(VP
(ADVP (RB consistently))
(VBG delivering)
(NP (DT a)
(ADJP (RB very) (JJ convincing))
(NN message))))))
(. .)))