maven依赖:
org.jsoup
jsoup
1.10.2
java代码:
package com.main;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.HashSet;
import java.util.Set;
public class Demo {
//爬虫的网站
public String url = “https://www.baidu.com/”;
//遍历的深度
public int depth = 1;
PrintStream ps = null;
//存储已经爬虫的网站
public Set getUrls = new HashSet();
public static void main(String[] args) {
try {
Demo demo = new Demo();
//使用UTF-8编码,不覆盖写入
demo.ps = new PrintStream(new PrintStream(“C://Demo//demo.txt”),true,“UTF-8”);
demo.prase(demo.url,0);
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
void prase(String url,int d){
try {
Document document = Jsoup.connect(url).get();
Elements as = document.select(“a[href]”);
for(Element a : as){
if(a.attr(“href”).startsWith(“http”) && !getUrls.contains(a.attr(“href”))) {
ps.println(a.text());
ps.println(a.attr(“href”));
getUrls.add(url);
//判断是否超出深度
if(d < depth) {
prase(a.attr(“href”), d + 1);
}
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}