Stanford Parser 对一句话进行依存分析

package com.parser;

import java.util.List;
import java.io.StringReader;

import edu.stanford.nlp.process.Tokenizer;
import edu.stanford.nlp.process.TokenizerFactory;
import edu.stanford.nlp.process.CoreLabelTokenFactory;

import edu.stanford.nlp.process.PTBTokenizer;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;

public class StanfordParserDemo1Copy {
  private StanfordParserDemo1Copy() {} // static methods only
  public static void main(String[] args) {
	 
	  String parserModel = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz";
	  LexicalizedParser lp = LexicalizedParser.loadModel(parserModel);
	  String sent2 = "This is another sentence.";
	  demoAPI(lp,sent2);
    
  }

  public static void demoAPI(LexicalizedParser lp,String str) {
      
	  
	  TokenizerFactory<CoreLabel> tokenizerFactory = PTBTokenizer.factory(new CoreLabelTokenFactory(), "");
	  Tokenizer<CoreLabel> tok = tokenizerFactory.getTokenizer(new StringReader(str));
	  List<CoreLabel> rawWords2 = tok.tokenize();
	  Tree parse = lp.apply(rawWords2);
	
	  TreebankLanguagePack tlp = lp.treebankLanguagePack(); // PennTreebankLanguagePack for English
	  GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
	  GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
	  List<TypedDependency> tdl = gs.typedDependenciesCCprocessed();
	  
	  for(int i = 0; i < tdl.size(); i++){
		  System.out.println(tdl.get(i));
	  }
	  //System.out.println(tdl);
	  // System.out.println();

          // You can also use a TreePrint object to print trees and dependencies
          //TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed");
          //tp.printTree(parse);
  }

}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值