1.启项目
scrapy startproject Bilibili
cd Bilibili
scrapy genspider bilibili www.bilibili.com
2.管道文件 items.py
定义要爬取的数据结构
import scrapy
class BilibiliItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 类型只有Field吗?>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
rank_type = scrapy.Field() # 榜单类型
rank_no = scrapy.Field() # 排名
title = scrapy.Field() # 标题
play_num = scrapy.Field() # 播放数
comment_num = scrapy.Field() # 评论数
uploader = scrapy.Field() # 上传者
score = scrapy.Field() # 评分
set_name = scrapy.Field() # mongo中的库名
3.爬虫程序 bilibili.py
from Bilibili.items import BilibiliItem
class BilibiliSpider(scrapy.Spider):
name = 'bilibili'
allowed_domains = ['www.bilibili.com']
# start_urls = ['http://www.bilibili.com/']
base_url = 'https://www.bilibili.com/ranking/'
rank_type = {
'all': ['all/0/0/3', '全站榜'],
'origin': ['origin/0/0/3', '原创榜'],
'bangumi': ['bangumi/13/0/3', '新番榜'],
'cinema': ['cinema/177/0/3', '影视榜'],
'rookie': ['rookie/0/0/3', '新人榜']
}
# 爬虫开始的函数,向调度器发送url并指定回调函数
def start_requests(self):
# 调用类中的变量要加self
for value in self.rank_type.values():
url = self.base_url + value[0]
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
item = BilibiliItem()
# 获得请求的url地址
url = response.url
item['set_name'] = url.split('/')[4]
item['rank_type'] = self.rank_type[item['set_name']][1]
# 直接对response进行xpath解析,得到选择器对象的列表
info_list = response.xpath("//div[@class='rank-list-wrap']/ul/li")
# 遍历每一个li元素
for info in info_list:
item['rank_no'] = info.xpath("./div[@class='num']/text()")[0].extract()
item['title'] = info.xpath(".//div[@class='info']/a/text()")[0].extract()
# 这里是一个li元素里面,所以xpath解析出来只有3个元素,索引取值即可
detals = info.xpath(".//span/text()")
item['play_num'] = detals[0].extract()
item['comment_num'] = detals[1].extract()
item['uploader'] = detals[2].extract()
item['score'] = info.xpath(".//div[2]/div/text()")[0].extract()
yield item
4.管道文件 pipelines.py
import pymongo, pymysql, csv
from .settings import *
class MongoDBPipeline(object):
def __init__(self):
self.conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)
self.db = self.conn['Bili']
def process_item(self, item, spider):
# spider参数有什么用?
self.set = self.db[item['set_name']]
d = dict(item)
self.set.insert_one(d)
return item
class CSVPipeline(object):
# 可以实现写入一个csv文件的不同sheet吗?>>>>>>>>>>>>>>>>>>>>>>>>>>>
def process_item(self, item, spider):
# 写入csv文件,需要设置encoding属性为gb18030(utf-8会出现乱码)
with open(item['rank_type']+".csv", 'a', newline='', encoding='gb18030') as f:
writer = csv.writer(f)
writer.writerow(
[item['rank_no'],item['title'],
item['play_num'],item['comment_num'],
item['uploader'],item['score']
])
return item
class MysqlPipeline(object):
def __init__(self):
self.db = pymysql.connect(MYSQL_HOST,MYSQL_USER,MYSQL_PASSWORD,MYSQL_DB,charset='utf8')
self.cursor = self.db.cursor()
def process_item(self, item, spider):
# sql语句可使用占位符,execute的时候加个参数列表
sql = 'insert into test values(%s,%s,%s,%s,%s,%s)'
self.cursor.execute(sql,[
item['rank_no'], item['title'],item['play_num'],
item['comment_num'],item['uploader'],item['score']
])
self.db.commit()
return item
# 爬虫程序结束前的善后操作,只执行一次
def close_spider(self,spider):
self.cursor.close()
self.db.close()
print('关数据库')
5.修改配置 settings.py
# 关闭robots协议
ROBOTSTXT_OBEY = False
# 设置默认请求头
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
# 开启下载管道
ITEM_PIPELINES = {
'Bilibili.pipelines.MongoDBPipeline': 300,
'Bilibili.pipelines.CSVPipeline': 200,
'Bilibili.pipelines.MysqlPipeline': 100,
}
# 设置数据库参数
MONGO_HOST = '192.168.0.106'
MONGO_PORT = 27017
MYSQL_HOST = '192.168.0.106'
MYSQL_USER = 'root'
MYSQL_PASSWORD = '123456'
MYSQL_DB = 'Scrapy'
# 设置日志级别和日志文件
LOG_LEVEL = 'WARNING'
LOG_FILE = 'spider.log'
6.项目启动:创建begin.py(和scrapy.cfg文件同目录)
from scrapy import cmdline
cmdline.execute('scrapy crawl bilibili'.split())