今天看文章发现一个叫Jsoup的东西,解析html很好用,听说用来爬虫;
官网:https://www.open-open.com/jsoup/
demo:
/**
* 2020年10月27日下午2:36:20
*/
package testJsoup;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpClient.Version;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.util.Iterator;
import javax.imageio.stream.FileImageOutputStream;
import javax.imageio.stream.ImageOutputStream;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
/**
* @author XWF
*
*/
public class TestJsoup {
/**
* @param args
*/
public static void main(String[] args) {
try {
Document document = Jsoup.parse("<html><head></head><body><p id=1>Body Text</p><p id=2>Test2</p></body></html>");
Elements elements = document.getElementsByTag("p");//获得document的p标签
Iterator<Element> it = elements.iterator();
while(it.hasNext()) {
Element element = it.next();
System.out.println(element.id() + ":" + element.text());
}
Document doc = Jsoup.connect("http://www.baidu.com").get();//从网站读取
Element img_logo = doc.getElementById("s_lg_img");//根据id获取
String img_src_url = img_logo.absUrl("src");//获取img标签的src绝对路径
System.out.println(img_src_url);
// HttpClient client = HttpClient.newBuilder().build();
// HttpRequest request = HttpRequest.newBuilder()
// .version(Version.HTTP_1_1)
// .uri(URI.create(img_src_url))
// .GET()
// .build();
// HttpResponse<InputStream> response = client.send(request, HttpResponse.BodyHandlers.ofInputStream());
// InputStream is = response.body();
// File file = new File("logo.png");
// ImageOutputStream ios = new FileImageOutputStream(file);
// ios.write(is.readAllBytes());
// ios.close();
Elements eles = doc.select("span.title-content-title");//获取class为title-content-title的全部span标签
eles.forEach(ele -> System.out.println(ele.text()));
// } catch (InterruptedException e) {
// e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
}
结果: