import scrapy
class ZhihuSpider(scrapy.Spider):
# 爬虫名字,名字唯一,允许自定义
name = ‘zhihu’
# 允许爬取的域名,scrapy每次发起的url爬取数据,先回进行域名检测,检测通过就爬取
allowed_domains = [‘zhihu.com’]
#发起的起始url地址,scrapy项目启动自动调用start_request()方法,把start_urls
# url的作为请求url发起请求,把获取到的response交给回调函数,回调函数传递给parse
# 解析数据
start_urls = [‘http://zhihu.com/’]
custom_settings = {
# 请求头
'user-agent': None,
# 请求来源
# 'referer': 'https://www.zhihu.com/',
}
def start_requests(self):
'重写start_requests方法'
for url in self.start_urls:
#自定义解析方法
yield scrapy.Request(url=url,method='Get',callback=self.define_parse)
def parse(self, response):
pass
def define_parse(self,response):
print(response)
#输出状态码
self.logger.info(response.status)