spider.py
import pymysql
class MySpider(scrapy.Spider):
MAX_RETRY = 10
logger = logging.getLogger(__name__)
name = 'myspider'
start_urls = []
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(MySpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signals.spider_closed)
return spider
def __init__(self):
# 连接database
conn = pymysql.connect(host="数据库地址",user="用户名",password="数据库密码",database="数据库名",charset="utf8")
self.conn = conn
self.logger.info('Connection to database opened')
super(MySpider, self)
def spider_closed(self, spider):
self.db.close()
self.logger.info('Connection to database closed')
def parse(self, response):
item = MyItem()
#这里处理抓取逻辑
yield item
def errback_httpbin(self):
self.logger.info('http error')
def start_requests(self):
cursor = self.conn.cursor()
#这里处理查询数据库逻辑
cursor.execute('SELECT * FROM mytable WHERE nbErrors < %s', (self.MAX_RETRY,))
rows = cursor.fetchall()
for row in rows:
yield Request(row[0], self.parse, meta={
'splash': {
'args':{
'html': 1,
'wait': 2
}
}
}, errback=self.errback_httpbin)
cursor.close()