目标:爬取虎扑首页的新闻标题并展示在页面上,步骤如下
1、新建spring boot项目
2、引入Jsoup依赖
<dependency>
<groupId>org.jsoup</groupId>
<artifactId>jsoup</artifactId>
<version>1.12.1</version>
</dependency>
3、编写爬虫接口,为了简单起见,这里直接在启动类编写接口
package com.llg.jsoup;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import java.io.IOException;
@SpringBootApplication
@RestController
public class JsoupApplication {
public static void main(String[] args) {
SpringApplication.run(JsoupApplication.class, args);
}
@GetMapping("/soup")
public String soup(){
StringBuilder str = new StringBuilder();
try {
String url = "https://voice.hupu.com/nba";
Document document = Jsoup.connect(url).get();
// 使用 css选择器 提取列表新闻 a 标签
Elements elements = document.select("div.news-list > ul > li > div.list-hd > h4 > a");
for (Element element:elements){
String d_url = element.attr("href");
String title = element.ownText();
str.append("<a href='")
.append(d_url)
.append("'>")
.append(title)
.append("<a><br><br>");
}
} catch (IOException e) {
e.printStackTrace();
}
return str.toString();
}
}
4、测试,访问:http://localhost:8080/soup