Jsoup获取DOM元素


Jsoup提供了丰富的API来给我们查找我们需要的DOM元素


通过HttpClient 爬取页面,在用Jsoup获取指定元素


例举一些常用的

getElementById(String id) 根据id来查询DOM

getElementsByTag(String tagName) 根据tag名称来查询DOM

getElementsByClass(String className) 根据样式名称来查询DOM

getElementsByAttribute(String key) 根据属性名来查询DOM

getElementsByAttributeValue(String key,String value)  根据属性名和属性值来查询DOM


案例:

package com.gcx.test;

import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * Hello world!
 *
 */
public class App 
{
    public static void main(String[] args) throws Exception{
        CloseableHttpClient httpclient = HttpClients.createDefault(); // 创建httpclient实例
        HttpGet httpget = new HttpGet("http://www.cnblogs.com/"); // 创建httpget实例

        CloseableHttpResponse response = httpclient.execute(httpget); // 执行get请求
        HttpEntity entity=response.getEntity(); // 获取返回实体
        String content=EntityUtils.toString(entity, "utf-8");
        response.close(); // 关闭流和释放系统资源

        Document doc=Jsoup.parse(content); // 解析网页 得到文档对象

        Element navTopElement=doc.getElementById("site_nav_top"); // 根据id来查询DOM
        String navTop=navTopElement.text(); // 返回元素的文本
        System.out.println("口号:"+navTop);

        Elements titleElements=doc.getElementsByTag("title"); // 根据tag名称来查询DOM
        Element titleElement=titleElements.get(0); // 获取第1个元素
        String title=titleElement.text(); // 返回元素的文本
        System.out.println("网页标题是:"+title);

        Elements postItemElements=doc.getElementsByClass("post_item"); // 根据样式名称来查询DOM
        for(Element e:postItemElements){
            System.out.println(e.toString());
            System.out.println("1111111111111111111111111111111");
        }
        System.out.println("***********************");
        Elements widthElements=doc.getElementsByAttribute("width"); // 根据属性名来查询DOM
        for(Element e:widthElements){
            System.out.println(e.toString());
        }

        System.out.println("----------------------------");
        Elements targetElements=doc.getElementsByAttributeValue("target", "_blank");  // 根据属性名和属性值来查询DOM
        for(Element e:targetElements){
            System.out.println(e.toString());
        }
    }
}


运行结果:



上面通过标签名,Id,Class样式等来搜索DOM,这些是不能满足实际开发需求的,

很多时候我们需要寻找有规律的DOM集合,很多个有规律的标签层次

这时候,选择器就用上了 css jquery都有,Jsoup支持css,jquery类似的选择器语法



package com.gcx.test;

import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * Hello world!
 *
 */
public class App 
{
    public static void main(String[] args) throws Exception{
        CloseableHttpClient httpclient = HttpClients.createDefault(); // 创建httpclient实例
        HttpGet httpget = new HttpGet("http://www.cnblogs.com/"); // 创建httpget实例

        CloseableHttpResponse response = httpclient.execute(httpget); // 执行get请求
        HttpEntity entity=response.getEntity(); // 获取返回实体
        String content=EntityUtils.toString(entity, "utf-8");
        response.close(); // 关闭流和释放系统资源

        Document doc=Jsoup.parse(content); // 解析网页 得到文档对象

        Elements linkElements=doc.select(".post_item .post_item_body h3 a"); // 查找所有帖子DOM
        for(Element e:linkElements){
            System.out.println("博客标题:"+e.text());
            System.out.println("-------------");
        }

        Elements hrefElements=doc.select("a[href]"); // 带有href属性的a元素
        for(Element e:hrefElements){
            System.out.println(e.toString());
            System.out.println("-------------");
        }

        Elements imgElements=doc.select("img[src$=.png]"); // 查找扩展名为.png的图片DOM节点
        for(Element e:imgElements){
            System.out.println(e.toString());
            System.out.println("-------------");
        }

        Element element=doc.getElementsByTag("title").first(); // 获取tag是title的所有DOM元素
        String title=element.text(); // 返回元素的文本
        System.out.println("网页标题是:"+title);
    }
}



获取属性值


package com.gcx.test;

import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * Hello world!
 *
 */
public class App 
{
    public static void main(String[] args) throws Exception{
        CloseableHttpClient httpclient = HttpClients.createDefault(); // 创建httpclient实例
        HttpGet httpget = new HttpGet("http://www.cnblogs.com/"); // 创建httpget实例

        CloseableHttpResponse response = httpclient.execute(httpget); // 执行get请求
        HttpEntity entity=response.getEntity(); // 获取返回实体
        String content=EntityUtils.toString(entity, "utf-8");
        response.close(); // 关闭流和释放系统资源

        Document doc=Jsoup.parse(content); // 解析网页 得到文档对象

        Elements linkElements=doc.select("#post_list .post_item .post_item_body h3 a"); //通过选择器查找所有博客链接DOM
        for(Element e:linkElements){
            System.out.println("博客标题:"+e.text());
            System.out.println("博客地址:"+e.attr("href"));
            System.out.println("target:"+e.attr("target"));
        }

        Element linkElement=doc.select("#friend_link").first();
        System.out.println("纯文本:"+linkElement.text());
        System.out.println("html:"+linkElement.html());


    }
}



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值