java小爬虫(无正则表达式)

整理了一下自己以前玩过的东西,好久了,忘了什么时候,就找找以前的东西,重新整理一下,以前写的是直接爬 了某个站的cosplay的图片下来。

直接 上代码:

import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;

public class ReptileTest {
    public static void main(String[] args) throws Exception {
        String URL = "http://某个cosplay网站";
        List<String> list = getPageURL(URL);//获取到帖子的链接
        List<String> imgURL = getImgURL(list);//获取到所有图片的链接
        downImg(imgURL);//下载图片
    }
    /*获取所有的贴子的链接*/
    public static List<String> getPageURL(String URL) throws Exception {

        List<String> list = new ArrayList<String>();
        Document document = null;
        document = Jsoup.connect(URL).get();
        Elements elements = null;
        //获取第1页贴子的链接
        elements = document.select(".pic_kuang a");
        //处理页面元素,获取最后一页
        Elements gpage = document.select(".gpage a");
        for (Element element : gpage) {
            list.add(element.text());
        }
        //获取最后一页
        int LastPage = Integer.parseInt(list.get(list.size() - 2));
        list.clear();
        //循环遍历获取每一页的贴子的链接
        for (int i = 1; i <= LastPage; i++) {
            if (i > 1) {
                document = Jsoup.connect("http://xxxxx/picture/?page=" + i).get();
                elements = document.select(".pic_kuang a");
            }
            for (Element element : elements) {
                String href = element.attr("href");
                list.add("http://www.xxxxx.com" + href);
            }
            System.out.println("添加第" + i + "页成功");
        }
        return list;
    }

    /*获取所有的贴子的所有图片链接*/
    public static List<String> getImgURL(List<String> list)throws Exception {
        List<String> listImg = new ArrayList<String>();
        for (String s : list) {
            //System.out.println(s);
            Document document = null;
            document = Jsoup.connect(s).get();
            Elements select = document.select(".p a");
            for (Element element : select) {
                Elements imgs = element.select("a img");
                String src = imgs.attr("src");
                //System.out.println(src);
                if (src.length() > 10) {
                    listImg.add(src);
                    System.out.println("获取图片src:" + src + "成功");
                }
            }
        }
        return listImg;
    }

    /*抓取图片到本地*/
    public static void downImg(List<String> list) throws Exception {
        InputStream inputStream = null;
        OutputStream outputStream = null;
        for (String imgSrc : list) {
            //获得图片名
            String imgName = imgSrc.substring(imgSrc.lastIndexOf("/") + 1);
            //创建连接
            CloseableHttpClient httpClient = HttpClients.createDefault();
            //设置超时连接
            RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(5000).setConnectionRequestTimeout(1000).setSocketTimeout(5000).build();
            //创建连接对象
            HttpGet httpGet = new HttpGet("http://www.xxx.com" + imgSrc);
            //配置连接超时
            httpGet.setConfig(requestConfig);
            CloseableHttpResponse response = httpClient.execute(httpGet);
            if (response.getStatusLine().getStatusCode() == 200) {
                HttpEntity entity = response.getEntity();
                inputStream = entity.getContent();
                outputStream = new FileOutputStream(new File("D:\\code\\reptile\\" + imgName));
                if (IOUtils.copy(inputStream, outputStream) > 0) {
                    System.out.println("拷贝成功" + imgName);
                }
                inputStream.close();
                outputStream.close();
            }
        }
    }
}

以上就是一个爬虫的实现 ,过程没有用到正则,都是按照自己想要的东西来拿 ,主要是分3步,在主类main里面有写到。主要代码是downImg()方法,解析网站用的是Jsoup框架,下面是依赖。

 <!-- https://mvnrepository.com/artifact/org.jsoup/jsoup -->
        <dependency>
            <groupId>org.jsoup</groupId>
            <artifactId>jsoup</artifactId>
            <version>1.11.2</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient -->
        <dependency>
            <groupId>org.apache.httpcomponents</groupId>
            <artifactId>httpclient</artifactId>
            <version>4.5.6</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/commons-io/commons-io -->
        <dependency>
            <groupId>commons-io</groupId>
            <artifactId>commons-io</artifactId>
            <version>2.4</version>
        </dependency>

运行的结果:
在这里插入图片描述
总得来说还可以吧。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值