springboot添加webmagic_springboot+webmagic实现java爬虫jdbc及mysql的方法

前段时间需要爬取网页上的信息,自己对于爬虫没有任何了解,就了解了一下webmagic,写了个简单的爬虫。

一、首先介绍一下webmagic:

webmagic采用完全模块化的设计,功能覆盖整个爬虫的生命周期(链接提取、页面下载、内容抽取、持久化),支持多线程抓取,分布式抓取,并支持自动重试、自定义UA/cookie等功能。

实现理念:

Maven依赖:

us.codecraft

webmagic-core

0.7.3

us.codecraft

webmagic-extension

0.7.3

us.codecraft

webmagic-extension

0.7.3

org.slf4j

slf4j-log4j12

jdbc模式:

ublic class CsdnBlogDao {

private Connection conn = null;

private Statement stmt = null;

public CsdnBlogDao() {

try {

Class.forName("com.mysql.jdbc.Driver");

String url = "jdbc:mysql://localhost:3306/test?"

+ "user=***&password=***3&useUnicode=true&characterEncoding=UTF8";

conn = DriverManager.getConnection(url);

stmt = conn.createStatement();

} catch (ClassNotFoundException e) {

e.printStackTrace();

} catch (SQLException e) {

e.printStackTrace();

}

}

public int add(CsdnBlog csdnBlog) {

try {

String sql = "INSERT INTO `test`.`csdnblog` (`keyes`, `titles`, `content` , `dates`, `tags`, `category`, `views`, `comments`, `copyright`) VALUES (?, ?, ?, ?, ?, ?, ?, ?,?);";

PreparedStatement ps = conn.prepareStatement(sql);

ps.setInt(1, csdnBlog.getKey());

ps.setString(2, csdnBlog.getTitle());

ps.setString(3,csdnBlog.getContent());

ps.setString(4, csdnBlog.getDates());

ps.setString(5, csdnBlog.getTags());

ps.setString(6, csdnBlog.getCategory());

ps.setInt(7, csdnBlog.getView());

ps.setInt(8, csdnBlog.getComments());

ps.setInt(9, csdnBlog.getCopyright());

return ps.executeUpdate();

} catch (SQLException e) {

e.printStackTrace();

}

return -1;

}

}

实体类:

public class CsdnBlog {

private int key;// 编号

private String title;// 标题

private String dates;// 日期

private String tags;// 标签

private String category;// 分类

private int view;// 阅读人数

private int comments;// 评论人数

private int copyright;// 是否原创

private String content; //文字内容

public String getContent() {

return content;

}

public void setContent(String content) {

this.content = content;

}

public int getKey() {

return key;

}

public void setKey(int key) {

this.key = key;

}

public String getTitle() {

return title;

}

public void setTitle(String title) {

this.title = title;

}

public String getDates() {

return dates;

}

public void setDates(String dates) {

this.dates = dates;

}

public String getTags() {

return tags;

}

public void setTags(String tags) {

this.tags = tags;

}

public String getCategory() {

return category;

}

public void setCategory(String category) {

this.category = category;

}

public int getView() {

return view;

}

public void setView(int view) {

this.view = view;

}

public int getComments() {

return comments;

}

public void setComments(int comments) {

this.comments = comments;

}

public int getCopyright() {

return copyright;

}

public void setCopyright(int copyright) {

this.copyright = copyright;

}

@Override

public String toString() {

return "CsdnBlog [key=" + key + ", title=" + title + ", content=" + content + ",dates=" + dates + ", tags=" + tags + ", category="

+ category + ", view=" + view + ", comments=" + comments + ", copyright=" + copyright + "]";

}

}

启动类:

public class CsdnBlogPageProcessor implements PageProcessor {

private static String username="CHENYUFENG1991"; // 设置csdn用户名

private static int size = 0;// 共抓取到的文章数量

// 抓取网站的相关配置,包括:编码、抓取间隔、重试次数等

private Site site = Site.me().setRetryTimes(3).setSleepTime(1000);

public Site getSite() {

return site;

}

// process是定制爬虫逻辑的核心接口,在这里编写抽取逻辑

public void process(Page page) {

// 列表页

if (!page.getUrl().regex("http://blog\\.csdn\\.net/" + username + "/article/details/\\d+").match()) {

// 添加所有文章页

page.addTargetRequests(page.getHtml().xpath("//div[@id='article_list']").links()// 限定文章列表获取区域

.regex("/" + username + "/article/details/\\d+")

.replace("/" + username + "/", "http://blog.csdn.net/" + username + "/")// 巧用替换给把相对url转换成绝对url

.all());

// 添加其他列表页

page.addTargetRequests(page.getHtml().xpath("//div[@id='papelist']").links()// 限定其他列表页获取区域

.regex("/" + username + "/article/list/\\d+")

.replace("/" + username + "/", "http://blog.csdn.net/" + username + "/")// 巧用替换给把相对url转换成绝对url

.all());

// 文章页

} else {

size++;// 文章数量加1

// 用CsdnBlog类来存抓取到的数据,方便存入数据库

CsdnBlog csdnBlog = new CsdnBlog();

// 设置编号

csdnBlog.setKey(Integer.parseInt(

page.getUrl().regex("http://blog\\.csdn\\.net/" + username + "/article/details/(\\d+)").get()));

// 设置标题

csdnBlog.setTitle(

page.getHtml().xpath("//div[@class='article_title']//span[@class='link_title']/a/text()").get());

//设置内容

csdnBlog.setContent(

page.getHtml().xpath("//div[@class='article_content']/allText()").get());

// 设置日期

csdnBlog.setDates(

page.getHtml().xpath("//div[@class='article_r']/span[@class='link_postdate']/text()").get());

// 设置标签(可以有多个,用,来分割)

csdnBlog.setTags(listToString(page.getHtml().xpath("//div[@class='article_l']/span[@class='link_categories']/a/allText()").all()));

// 设置类别(可以有多个,用,来分割)

csdnBlog.setCategory(listToString(page.getHtml().xpath("//div[@class='category_r']/label/span/text()").all()));

// 设置阅读人数

csdnBlog.setView(Integer.parseInt(page.getHtml().xpath("//div[@class='article_r']/span[@class='link_view']")

.regex("(\\d+)人阅读").get()));

// 设置评论人数

csdnBlog.setComments(Integer.parseInt(page.getHtml()

.xpath("//div[@class='article_r']/span[@class='link_comments']").regex("\\((\\d+)\\)").get()));

// 设置是否原创

csdnBlog.setCopyright(page.getHtml().regex("bog_copyright").match() ? 1 : 0);

// 把对象存入数据库

new CsdnBlogDao().add(csdnBlog);

// 把对象输出控制台

System.out.println(csdnBlog);

}

}

// 把list转换为string,用,分割

public static String listToString(List stringList) {

if (stringList == null) {

return null;

}

StringBuilder result = new StringBuilder();

boolean flag = false;

for (String string : stringList) {

if (flag) {

result.append(",");

} else {

flag = true;

}

result.append(string);

}

return result.toString();

}

public static void main(String[] args) {

long startTime, endTime;

System.out.println("【爬虫开始】...");

startTime = System.currentTimeMillis();

// 从用户博客首页开始抓,开启5个线程,启动爬虫

Spider.create(new CsdnBlogPageProcessor()).addUrl("http://blog.csdn.net/" + username).thread(5).run();

endTime = System.currentTimeMillis();

System.out.println("【爬虫结束】共抓取" + size + "篇文章,耗时约" + ((endTime - startTime) / 1000) + "秒,已保存到数据库,请查收!");

}

}

使用mysql类型:

public class GamePageProcessor implements PageProcessor {

private static final Logger logger = LoggerFactory.getLogger(GamePageProcessor.class);

private static DianJingService d;

private static BannerService bs;

private static SportService ss;

private static YuLeNewsService ys;

private static UpdateService ud ;

// 抓取网站的相关配置,包括:编码、抓取间隔、重试次数等

private Site site = Site.me().setRetryTimes(3).setSleepTime(1000);

public Site getSite() {

return site;

}

// process是定制爬虫逻辑的核心接口,在这里编写抽取逻辑

public static void main(String[] args) {

ConfigurableApplicationContext context= SpringApplication.run(GamePageProcessor.class, args);

d = context.getBean(DianJingService.class);

//Spider.create(new GamePageProcessor()).addUrl("网址").thread(5).run();

}

public void process(Page page) {

Selectable url = page.getUrl();

if (url.toString().equals("网址")) {

DianJingVideo dv = new DianJingVideo();

List ls = page.getHtml().xpath("//div[@class='v']/div[@class='v-meta va']/div[@class='v-meta-title']/a/text()").all();

//hrefs

List ls1 = page.getHtml().xpath("//div[@class='v']/div[@class='v-link']/a/@href").all();//获取a标签的href

List ls2 = page.getHtml().xpath("//div[@class='v']/div[@class='v-meta va']/div[@class='v-meta-entry']/div[@class='v-meta-data']/span[@class='r']/text()").all();

//photo

List ls3 = page.getHtml().xpath("//div[@class='v']/div[@class='v-thumb']/img/@src").all();

for (int i = 0; i < 5; i++) {

dv.setTitles(ls.get(i));

dv.setCategory("");

dv.setDates(ls2.get(i));

dv.setHrefs(ls1.get(i));

dv.setPhoto(ls3.get(i));

dv.setSources("");

d.addVideo(dv);

}

}

}

Controller:

@Controller

@RequestMapping(value = "/dianjing")

public class DianJingController {

@Autowired

private DianJingService s;

/*

手游

*/

@RequestMapping("/dianjing")

@ResponseBody

public Object dianjing(){

List list = s.find2();

JSONObject jo = new JSONObject();

if(list!=null){

jo.put("code",0);

jo.put("success",true);

jo.put("count",list.size());

jo.put("list",list);

}

return jo;

}

}

实体类就不展示了

dao层

@Insert("insert into dianjing (titles,dates,category,hrefs,photo,sources) values(#{titles},#{dates},#{category},#{hrefs},#{photo},#{sources})")

int adddj(DianJing dj);

以上这篇springboot+webmagic实现java爬虫jdbc及mysql的方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持脚本之家。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值