Python 爬虫开发杂记之(八)pyspider框架将数据写入Mysql

一、不写入Mysql

根据  Python3 网络爬虫开发实战 (崔庆才著)这本书第十二章第二节的教程中去爬去哪儿网的攻略。

以下是完整的脚本:

from pyspider.libs.base_handler import *


class Handler(BaseHandler):
    crawl_config = {
    }
    
    @every(minutes=24 * 60)
    def on_start(self):
        self.crawl('http://travel.qunar.com/travelbook/list.htm', callback=self.index_page)
    
    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        for each in response.doc('li > .tit > a').items():
            self.crawl(each.attr.href, callback=self.detail_page, fetch_type='js')
        next = response.doc('.next').attr.href
        self.crawl(next, callback=self.index_page)
    
    @config(priority=2)
    def detail_page(self, response):
        return {
            'url': response.url,
            'title': response.doc('#booktitle').text(),
            'date': response.doc('.when .data').text(),
            'day': response.doc('.howlong .data').text(),
            'who': response.doc('.who .data').text(),
            'text': response.doc('#b_panel_schedule').text(),
            'image': response.doc('.cover_img').attr.src
        }

这个脚本里只是单纯的将结果打印在pyspider 的web ui中,并没有存到其它地方。

二、将爬出来的数据存到Mysql中

插入数据库的话,需要我们在调用它之前定义一个add_Mysql函数。 并且需要将连接数据库初始化放在__init__函数中。

 #连接数据库
    def __init__(self):
        self.db = pymysql.connect('localhost', 'root', '123456', 'python', charset='utf8')

    def add_Mysql(self, order_num, url, title, date, day, who, text, image):
        try:
            cursor = self.db.cursor()
            sql = 'insert into qunar(order_num, url, title, date, day, who, text, image) values (%d,"%s","%s","%s","%s","%s","%s","%s")' % (order_num, url, title, date, day, who, text, image);   #插入数据库的SQL语句
            print(sql)
            cursor.execute(sql)
            print(cursor.lastrowid)
            self.db.commit()
        except Exception as e:
            print(e)
            self.db.rollback()

然后在detail_page中调用add_Mysql函数

@config(priority=2)
    def detail_page(self, response):
        global count  # global声明
        count += 1
        url=response.url
        title=response.doc('title').text()
        date=response.doc('.when .data').text()
        day=response.doc('.howlong .data').text()
        who=response.doc('.who .data').text()
        text=response.doc('#b_panel_schedule').text()[0:100].replace('\"', '\'', 10)
        image=response.doc('.cover_img').attr.src
        self.add_Mysql(count, url, title, date, day, who, text, image)  #插入数据库
        return {
            "id":count,
            "url": response.url,
            "title": response.doc('title').text(),
            "date": response.doc('.when .data').text(),
            "day": response.doc('.howlong .data').text(),
            "who": response.doc('.who .data').text(),
            "text": response.doc('#b_panel_schedule').text(),
            "image": response.doc('.cover_img').attr.src      
        }

 完整的脚本为:

#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2018-09-08 21:37:08
# Project: qunr

from pyspider.libs.base_handler import *
import pymysql


count = 0

class Handler(BaseHandler):
    
    crawl_config = {
    }
    
    #连接数据库
    def __init__(self):
        self.db = pymysql.connect('localhost', 'root', '123456', 'python', charset='utf8')

    def add_Mysql(self, order_num, url, title, date, day, who, text, image):
        try:
            cursor = self.db.cursor()
            sql = 'insert into qunar(order_num, url, title, date, day, who, text, image) values (%d,"%s","%s","%s","%s","%s","%s","%s")' % (order_num, url, title, date, day, who, text, image);   #插入数据库的SQL语句
            print(sql)
            cursor.execute(sql)
            print(cursor.lastrowid)
            self.db.commit()
        except Exception as e:
            print(e)
            self.db.rollback()
    
    @every(minutes=24 * 60)
    def on_start(self):
        self.crawl('http://travel.qunar.com/travelbook/list.htm', callback=self.index_page)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        for each in response.doc('li > .tit > a').items():
            self.crawl(each.attr.href, callback=self.detail_page, fetch_type='js')
        next = response.doc('.next').attr.href
        self.crawl(next, callback=self.index_page);

    @config(priority=2)
    def detail_page(self, response):
        global count  # global声明
        count += 1
        url=response.url
        title=response.doc('title').text()
        date=response.doc('.when .data').text()
        day=response.doc('.howlong .data').text()
        who=response.doc('.who .data').text()
        text=response.doc('#b_panel_schedule').text()[0:100].replace('\"', '\'', 10)
        image=response.doc('.cover_img').attr.src
        self.add_Mysql(count, url, title, date, day, who, text, image)  #插入数据库
        return {
            "id":count,
            "url": response.url,
            "title": response.doc('title').text(),
            "date": response.doc('.when .data').text(),
            "day": response.doc('.howlong .data').text(),
            "who": response.doc('.who .data').text(),
            "text": response.doc('#b_panel_schedule').text(),
            "image": response.doc('.cover_img').attr.src      
        }
    
    

 

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值