-
生成项目
scrapy startproject scrapyMysql
-
生成爬虫
scrapy genspider inputMysql lab.scrapyd.cn
-
# -- coding: utf-8 --
import scrapy
class ScrapymysqlItem(scrapy.Item):
# 标签字段
tag = scrapy.Field()
# 名言内容
cont = scrapy.Field()
# 内容详情地址
url = scrapy.Field()
# 作者信息
author = scrapy.Field()
-
settings.py
增加ITEM_PIPELINES = {
‘scrapyMysql.MySQLPipelines.MySQLPipeline’: 300,
} -
# -- coding: utf-8 --
import pymysql.cursors
class MySQLPipeline(object):
def init(self):
# 链接数据库
self.connect = pymysql.connect(
host=‘127.0.0.1’, # 数据库地址
port=3306, # 数据库端口
db=‘scrapyMysql’, # 数据库名
user=‘root’, # 数据库用户名
passwd=‘root’, # 数据库密码
charset=‘utf8’, # 编码方式
use_unicode=True
)
# 通过cursor执行增删查改
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
self.cursor.execute(
# 纯python操作mysql知识,不熟悉恶补
“”“insert into mingyan(tag, cont,url,author) value (%s, %s,%s,%s)”"",
# item里定义的字段和表字段对应
(item[‘tag’],item[‘cont’],item[‘url’],item[‘author’],)
)
# 提交SQL语句
self.connect.commit()
# 必须实现返回
return item -
创建数据库
scrapymysqlscrapymysql
-
创建表
DROP TABLE IF EXISTS
mingyan
;
CREATE TABLEmingyan
(
id
int(10) NOT NULL AUTO_INCREMENT,
tag
varchar(255) DEFAULT NULL,
cont
mediumtext,
url
varchar(255) DEFAULT NULL,
author
varchar(255) DEFAULT NULL,
PRIMARY KEY (id
)
) ENGINE=InnoDB AUTO_INCREMENT=150 DEFAULT CHARSET=utf8mb4;
# -*- coding: utf-8 -*-
import scrapy
# 引入Item
from scrapyMysql.items import ScrapymysqlItem
class InputmysqlSpider(scrapy.Spider):
name = 'inputMysql'
allowed_domains = ['lab.scrapyd.cn']
start_urls = ['http://lab.scrapyd.cn/']
def parse(self, response):
mingyan = response.css('div.quote')
# 实例化item类
item = ScrapymysqlItem()
# 循环获取每一条名言里面的:名言内容、作者、标签、链接地址
for v in mingyan:
# 提取名言
item['cont'] = v.css('.text::text').extract_first()
# 提取标签
tags = v.css('.tags .tag::text').extract()
# 数组转换为字符串
item['tag'] = ','.join(tags)
# 提取内容详情地址
item['url'] = v.css('a::attr(href)').extract_first()
# 提取作者
item['author'] = v.css('small.author::text').extract_first()
# 把提取的数据提交给pipline处理
yield item
# css选择器提取下一页链接
next_page = response.css('li.next a::attr(href)').extract_first()
# 判断是否存在下一页
if next_page is not None:
next_page = response.urljoin(next_page)
# 提交给parse继续抓取下一页
yield scrapy.Request(next_page,callback=self.parse)
pass
-
执行爬虫
scrapy crawl inputMysql