java简单爬虫实现打印小说章节至控制台

 

最近公司比较闲,就想着研究研究爬虫,因为平时爱看点小说,那就对小说下手吧。话不多少,上源码

目录结构:

主处理方法:

package controller;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.util.*;

import org.apache.cxf.binding.corba.wsdl.Object;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import util.Browser;
import util.StringUtils;

/**
 * Created by lenovo on 2018/11/29.
 */
public class test {

    /**
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) throws Exception{
        Map<String,String> map = read();
        String baseUrl = map.get("baseUrl");
        String child = map.get("child");

        if (baseUrl==null||baseUrl.equals("")){
            run(search(),null);
        }else{
            run(baseUrl,child);
        }
    }

    public static void run(String baseUrl,String child) throws Exception {
        String chrildUrl = child;
        try {
            Document document = null;
            if (child!=null&&!child.equals("")){
                document = connect(baseUrl+chrildUrl);
                content(document);
            }else{
                chrildUrl = menu(baseUrl);
                document = connect(baseUrl+chrildUrl);
                content(document);
            }
            while (true){
                System.out.println(StringUtils.chooseMenu);
                int result = choose();
                switch (result){
                    case 1:
                        chrildUrl = lastUrl(document);
                        document = connect(baseUrl+chrildUrl);
                        content(document);
                        save("",chrildUrl);
                        break;
                    case 2:
                        chrildUrl = menu(baseUrl);
                        document = connect(baseUrl+chrildUrl);
                        content(document);
                        save("",chrildUrl);
                        break;
                    case 3:
                        chrildUrl = nextUrl(document);
                        document = connect(baseUrl+chrildUrl);
                        content(document);
                        save("",chrildUrl);
                        break;
                    case 4:
                        document = connect(baseUrl+chrildUrl);
                        content(document);
                        break;
                }
            }
        }catch (Exception e){
            try {
                Document document = connect(baseUrl+chrildUrl);
                content(document);
                while (true){
                    System.out.println(StringUtils.chooseMenu);
                    int result = choose();
                    switch (result){
                        case 1:
                            chrildUrl = lastUrl(document);
                            document = connect(baseUrl+chrildUrl);
                            content(document);
                            save("",chrildUrl);
                            break;
                        case 2:
                            chrildUrl = menu(baseUrl);
                            document = connect(baseUrl+chrildUrl);
                            content(document);
                            save("",chrildUrl);
                            break;
                        case 3:
                            chrildUrl = nextUrl(document);
                            document = connect(baseUrl+chrildUrl);
                            content(document);
                            save("",chrildUrl);
                            break;
                        case 4:
                            document = connect(baseUrl+chrildUrl);
                            content(document);
                            break;
                    }
                }
            }catch (Exception el){
                save("",chrildUrl);
                el.printStackTrace();
                System.out.print(el);
            }
        }
    }

    public static Document connect(String url) throws Exception {
        Document document=Jsoup.connect(url)
                .userAgent(Browser.Chrome_17$0_MAC)
                .header("Connection", "close")//如果是这种方式,这里务必带上
                .timeout(8000)//超时时间
                .get();

        return document;
    }

    public static Document getIndex() throws Exception {
        return connect(StringUtils.indexUrl);
    }

    public static String menu(String url) throws Exception{
        Document document = connect(url);
        Elements mulu = document.select(".mulu").select("a[href]");
        List<Map<String,String>> menu = new ArrayList<>();
        for (int i = 0;i<mulu.size();i++){
            System.out.println(i+" "+mulu.get(i).text());
            Map<String,String> map = new HashMap<>();
            map.put("url",mulu.get(i).attr("href"));
            map.put("menu",mulu.get(i).text());
            menu.add(map);
        }
        int state = choose();
        return menu.get(state).get("url");
    }

    public static String search() throws Exception{
        System.out.println("请输入内容:");
        String key = choose(null);
        Document document=Jsoup.connect(StringUtils.searchUrl).data("q",key)
                .userAgent(Browser.Chrome_17$0_MAC)
                .header("Connection", "close")//如果是这种方式,这里务必带上
                .timeout(8000)//超时时间
                .get();
        Elements elements = document.select("h2").select("a[href]");
        for (int i = 0;i<elements.size();i++){
            System.out.println(i+" "+elements.get(i).text());
        }
        System.out.println("请输入要选择的标号:");
        int choose = choose();
        String baseUrl = elements.get(choose).attr("href");
        save(baseUrl,"");
        return baseUrl;
    }

    public static void content(Document document){
        document.title();
        System.out.println(document.title());
        String content = document.select(".yd_text2").toString();
        content = content.substring(22,content.length()-6);
        content = content.replace("&nbsp;","");
        content = content.replace("<br>","");
        System.out.println(content);
    }

    public static String nextUrl(Document document){
        Elements menuDiv = document.select(".pereview");
        String url = menuDiv.get(0).select("a[href]").get(2).select("a[href]").attr("href");
        return url;
    }

    public static String lastUrl(Document document){
        Elements menuDiv = document.select(".pereview");
        String url = menuDiv.get(0).select("a[href]").get(0).select("a[href]").attr("href");
        return url;
    }

    public static int choose(){
        Scanner sc = new Scanner(System.in);
        return sc.nextInt();
    }
    public static String choose(Object ...param){
        Scanner sc = new Scanner(System.in);
        return sc.nextLine();
    }

    public static void save(String baseUrl,String child){
        Properties prop = new Properties();
        try{
            FileOutputStream oFile = new FileOutputStream("resources/url.properties", true);//true表示追加打开
            if (baseUrl!=null&&baseUrl!="") {
                prop.setProperty("baseUrl", baseUrl);
            }
            if (child!=null&&child!=""){
                prop.setProperty("child", child);
            }
            prop.store(oFile, "The New properties file");
            oFile.close();
        }catch (Exception e){
            System.out.println(e);
        }

    }

    public static Map<String,String> read(){
        Properties prop = new Properties();
        try{
            Map<String,String> map = new HashMap<String, String>();
            //读取属性文件a.properties
            InputStream in = new BufferedInputStream (new FileInputStream("resources/url.properties"));
            prop.load(in);     ///加载属性列表
            String baseUrl = prop.getProperty("baseUrl");
            String child = prop.getProperty("child");
            map.put("baseUrl",baseUrl);
            map.put("child",child);
            in.close();
            return map;
        }catch (Exception e){
            System.out.println(e);
        }
        return null;
    }

}

工具类:

package util;

/**
 * Created by lenovo on 2018/12/5.
 */
public class StringUtils {
    public static final String indexUrl = "https://www.88dush.com";
    public static final String searchUrl = "https://so.88dush.com/search/so.php";
    public static final String chooseMenu = "1:last;2:menu;3:next;4:refresh";
}
package util;

/**
 * 浏览器的UserAgent
 * Created by dongxiaoqi on 2018/11/30.
 */
public class Browser {

    public static final String safari_5$1_MAC  = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50";
    public static final String safari_5$1_Windows  = "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50";
    public static final String Firefox_38esr  = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0";
    public static final String IE11  = "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko";
    public static final String IE9  = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)";
    public static final String IE8  = "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)";
    public static final String IE7  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)";
    public static final String IE6  = "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)";
    public static final String Firefox_4$0$1_MAC  = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1";
    public static final String Firefox_4$0$1_Windows  = "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1";
    /**
     * 欧朋
     */
    public static final String Opera_11$11_MAC  = "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11";
    public static final String Opera_11$11_Windows   = "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11";
    public static final String Chrome_17$0_MAC  = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11";
    /**
     * 遨游浏览器
     */
    public static final String Maxthon  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)";
    public static final String TencentTT  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)";
    /**
     * 世界之窗
     */
    public static final String The_World_2X  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)";
    public static final String The_World_3X  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)";
    public static final String sougou  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)";
    public static final String SE360  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)";
    public static final String Avant = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)";
    public static final String Green_Browser  = "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)";

}

 url.properties   这个是记录当前阅读位置的文件,方便下次运行时接续上次位置

#The New properties file
#Wed Dec 05 11:33:17 CST 2018
baseUrl=https\://www.88dush.com/xiaoshuo/35/35954/
#The New properties file
#Wed Dec 05 11:57:44 CST 2018
#The New properties file
#Thu Dec 06 08:31:59 CST 2018
#The New properties file
#Fri Dec 07 13:37:23 CST 2018
child=11384708.html
#The New properties file
#Fri Dec 07 13:51:15 CST 2018
#The New properties file
#Fri Dec 07 13:52:20 CST 2018

写在最后:本次爬取目标站为88dush,挺棒的免费小说网,网址:https://www.88dush.com,如果被认为侵权请联系我删除,没看到就这么挂着吧  哈哈

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
Java Jsoup 是一个开源的 HTML 解析器库,可以用来实现简单的 Web 页面爬取。以下是一个简单Java Jsoup 爬虫实现: 1. 导入 Jsoup 库 首先需要在项目中导入 Jsoup 库,可以在 Maven 中添加以下依赖: ``` <dependency> <groupId>org.jsoup</groupId> <artifactId>jsoup</artifactId> <version>1.13.1</version> </dependency> ``` 2. 获取页面内容 通过 Jsoup 的 connect() 方法连接指定的 URL,并使用 get() 方法获取页面内容,如下所示: ``` String url = "https://www.example.com"; Document doc = Jsoup.connect(url).get(); ``` 3. 解析页面内容 使用 Jsoup 的 select() 方法选择页面中需要抓取的元素,并使用 text() 或者 attr() 方法获取其文本内容或属性值,如下所示: ``` Elements links = doc.select("a[href]"); for (Element link : links) { String href = link.attr("href"); String text = link.text(); System.out.println(text + " -> " + href); } ``` 以上代码会抓取页面中所有的链接,然后输出链接的文本和地址。 4. 完整代码 ``` import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; public class MyCrawler { public static void main(String[] args) { String url = "https://www.example.com"; try { Document doc = Jsoup.connect(url).get(); Elements links = doc.select("a[href]"); for (Element link : links) { String href = link.attr("href"); String text = link.text(); System.out.println(text + " -> " + href); } } catch (Exception e) { e.printStackTrace(); } } } ``` 以上代码可以抓取指定页面中的所有链接,并输出链接的文本和地址。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值