启动文件main.py
from scrapy.cmdline import execute
execute('scrapy crawl bili_gr_xx'.split())
执行spider下的爬取文件
# -*- coding: utf-8 -*-
import scrapy,json
from .. import items
class BiliGrXxSpider(scrapy.Spider):
name = 'bili_gr_xx'
allowed_domains = ['bilibili.com']
# start_urls = ['http://bilibili.com/']
# 我们使用这个函数作为初始的执行函数
def start_requests(self):
url = 'https://space.bilibili.com/ajax/member/GetInfo'
for i in range(1,201):
data_form = {
'mid':str(i),
'csrf':