xpath java html_Java 使用HtmlCleaner、Saxon和XPath(XPathEvaluator)进行html查找解析的方法...

本文介绍了如何在Java中结合HtmlCleaner和Saxon库使用XPath进行HTML解析,包括添加依赖、创建XPath表达式以及处理XPath查询结果。通过示例代码展示了如何选择和提取HTML文档中的特定内容。
摘要由CSDN通过智能技术生成

1、安装引用HtmlCleaner和Saxon

Maven中Pom.xml中添加依赖:

net.sourceforge.htmlcleaner

htmlcleaner

net.sf.saxon

Saxon-HE

2、使用HtmlCleaner、Saxon和XPath(XPathEvaluator)示例代码

package us.codecraft.webmagic.selector;

import net.sf.saxon.lib.NamespaceConstant;

import net.sf.saxon.xpath.XPathEvaluator;

import org.htmlcleaner.CleanerProperties;

import org..DomSerializer;

import org.htmlcleaner.HtmlCleaner;

import org.htmlcleaner.TagNode;

import org.slf4j.Logger;

import org.slf4j.LoggerFactory;

import org.w3c.dom.Document;

import org.w3c.dom.Node;

import org.w3c.dom.NodeList;

import javax.xml.namespace.NamespaceContext;

import javax.xml.transform.OutputKeys;

import javax.xml.transform.Transformer;

import javax.xml.transform.TransformerFactory;

import javax.xml.transform.dom.DOMSource;

import javax.xml.transform.stream.StreamResult;

import javax.xml.xpath.XPathConstants;

import javax.xml.xpath.XPathExpression;

import javax.xml.xpath.XPathExpressionException;

import java.io.StringWriter;

import java.util.ArrayList;

import java.util.Iterator;

import java.util.List;

import java.util.Map;

import java.util.concurrent.ConcurrentHashMap;

/**

* 支持xpath2.0的选择器。包装了HtmlCleaner和Saxon HE。

*

*/

public class Xpath2Selector implements Selector {

private String xpathStr;

private XPathExpression xPathExpression;

private Logger logger = LoggerFactory.getLogger(getClass());

//指定xpath表达式

public Xpath2Selector(String xpathStr) {

this.xpathStr = xpathStr;

try {

init();

} catch (XPathExpressionException e) {

throw new IllegalArgumentException("XPath error!", e);

}

}

enum XPath2NamespaceContext implements NamespaceContext {

INSTANCE;

private final Map prefix2NamespaceMap = new ConcurrentHashMap();

private final Map> namespace2PrefixMap = new ConcurrentHashMap>();

private void put(String prefix, String namespaceURI) {

prefix2NamespaceMap.put(prefix, namespaceURI);

List prefixes = namespace2PrefixMap.get(namespaceURI);

if (prefixes == null) {

prefixes = new ArrayList();

namespace2PrefixMap.put(namespaceURI, prefixes);

}

prefixes.add(prefix);

}

private XPath2NamespaceContext() {

put("fn", NamespaceConstant.FN);

put("xslt", NamespaceConstant.XSLT);

}

@Override

public String getNamespaceURI(String prefix) {

return prefix2NamespaceMap.get(prefix);

}

@Override

public String getPrefix(String namespaceURI) {

List prefixes = namespace2PrefixMap.get(namespaceURI);

if (prefixes == null || prefixes.size() < 1) {

return null;

}

return prefixes.get(0);

}

@Override

public Iterator getPrefixes(String namespaceURI) {

List prefixes = namespace2PrefixMap.get(namespaceURI);

if (prefixes == null || prefixes.size() < 1) {

return null;

}

return prefixes.iterator();

}

}

private void init() throws XPathExpressionException {

XPathEvaluator xPathEvaluator = new XPathEvaluator();

xPathEvaluator.setNamespaceContext(XPath2NamespaceContext.INSTANCE);

xPathExpression = xPathEvaluator.compile(xpathStr);

}

@Override

public String select(String text) {

try {

HtmlCleaner htmlCleaner = new HtmlCleaner();

TagNode tagNode = htmlCleaner.clean(text);

Document document = new DomSerializer(new CleanerProperties()).createDOM(tagNode);

Object result;

try {

result = xPathExpression.evaluate(document, XPathConstants.NODESET);

} catch (XPathExpressionException e) {

result = xPathExpression.evaluate(document, XPathConstants.STRING);

}

if (result instanceof NodeList) {

NodeList nodeList = (NodeList) result;

if (nodeList.getLength() == 0) {

return null;

}

Node item = nodeList.item(0);

if (item.getNodeType() == Node.ATTRIBUTE_NODE || item.getNodeType() == Node.TEXT_NODE) {

return item.getTextContent();

} else {

StreamResult xmlOutput = new StreamResult(new StringWriter());

Transformer transformer = TransformerFactory.newInstance().newTransformer();

transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");

transformer.transform(new DOMSource(item), xmlOutput);

return xmlOutput.getWriter().toString();

}

}

return result.toString();

} catch (Exception e) {

logger.error("select text error! " + xpathStr, e);

}

return null;

}

@Override

public List selectList(String text) {

List results = new ArrayList();

try {

HtmlCleaner htmlCleaner = new HtmlCleaner();

TagNode tagNode = htmlCleaner.clean(text);

Document document = new DomSerializer(new CleanerProperties()).createDOM(tagNode);

Object result;

try {

result = xPathExpression.evaluate(document, XPathConstants.NODESET);

} catch (XPathExpressionException e) {

result = xPathExpression.evaluate(document, XPathConstants.STRING);

}

if (result instanceof NodeList) {

NodeList nodeList = (NodeList) result;

Transformer transformer = TransformerFactory.newInstance().newTransformer();

StreamResult xmlOutput = new StreamResult();

transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");

for (int i = 0; i < nodeList.getLength(); i++) {

Node item = nodeList.item(i);

if (item.getNodeType() == Node.ATTRIBUTE_NODE || item.getNodeType() == Node.TEXT_NODE) {

results.add(item.getTextContent());

} else {

xmlOutput.setWriter(new StringWriter());

transformer.transform(new DOMSource(item), xmlOutput);

results.add(xmlOutput.getWriter().toString());

}

}

} else {

results.add(result.toString());

}

} catch (Exception e) {

logger.error("select text error! " + xpathStr, e);

}

return results;

}

}

使用方法:Xpath2Selector xpath2Selector=new Xpath2Selector("/html/body");

System.out.printf(xpath2Selector.select("

测试数据
"));

相关文档:

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
JsoupXpath 是一款纯Java开发的使用xpath解析html解析器,xpath语法分析与执行完全独立,html的DOM树生成借助Jsoup,故命名为JsoupXpath.为了在java里也享受xpath的强大与方便但又苦于找不到一款足够强大的xpath解析器,故开发了JsoupXpath。JsoupXpath的实现逻辑清晰,扩展方便,支持几乎全部常用的xpath语法.http://www.cnblogs.com/ 为例 "//a/@href"; "//div[@id='paging_block']/div/a[text()='Next >']/@href"; "//div[@id='paging_block']/div/a[text()*='Next']/@href"; "//h1/text()"; "//h1/allText()"; "//h1//text()"; "//div/a"; "//div[@id='post_list']/div[position()1000]/div/h3/allText()"; //轴支持 "//div[@id='post_list']/div[self::div/div/div/span[@class='article_view']/a/num()>1000]/div/h3/allText()"; "//div[@id='post_list']/div[2]/div/p/preceding-sibling::h3/allText()"; "//div[@id='post_list']/div[2]/div/p/preceding-sibling::h3/allText()|//div[@id='post_list']/div[1]/div/h3/allText()"; 在这里暂不列出框架间的对比了,但我相信,你们用了会发现JsoupXpath就是目前市面上最强大的的Xpath解析器。 快速开始 如果不方便使用maven,可以直接使用lib下的依赖包跑起来试试,如方便可直接使用如下dependency(已经上传至中央maven库,最新版本0.1.1):    cn.wanghaomiao    JsoupXpath    0.1.1 依赖配置好后,就可以使用如下例子进行体验了!String xpath="//div[@id='post_list']/div[./div/div/span[@class='article_view']/a/num()>1000]/div/h3/allText()";String doc = "..."; JXDocument jxDocument = new JXDocument(doc); List<Object> rs = jxDocument.sel(xpath); for (Object o:rs){     if (o instanceof Element){             int index = ((Element) o).siblingIndex();             System.out.println(index);     }     System.out.println(o.toString()); } 其他可以参考 cn.wanghaomiao.example包下的例子 语法 支持标准xpath语法(支持谓语嵌套),支持全部常用函数,支持全部常用轴,去掉了一些标准里面华而不实的函数和轴,下面会具体介绍。语法可以参考http://www.w3school.com.cn/xpath/index.asp 关于使用Xpath的一些注意事项 非常不建议直接粘贴Firefox或chrome里生成的Xpa
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值