1、url地址多个字段的书写,通过查看spider类,详细写法
1、url地址多个字段的书写,通过查看spider类,详细写法
class MaoyanSpider(scrapy.Spider):
name = 'maoyan'
allowed_domains = ['maoyan.com']
#start_urls = ['https://maoyan.com/films?showType=3']
def start_requests(self):
#可以进行循环遍历
base_url='https://maoyan.com/films?showType={}'
for i in range(0,100)
url=base_url.format(i)
#自动去除重复dont_filter=True
yield scrapy.Request(url, dont_filter=True)
2、通过标签进行分类保存
2、通过标签进行分类保存
import scrapy
from spider.items import MovieItem
class MaoyanSpider(scrapy.Spider):
name = 'maoyan'
allowed_domains = ['maoyan.com']
start_urls = ['https://maoyan.com/films?showType=3']
def parse(self, response):
names=response.xpath('//div[@class="channel-detail movie-item-title"]/a/text()').extract()
scores = [score.xpath('string(.)').extract_first() for score in
response.xpath('//div[@class="channel-detail channel-detail-orange"]')]
item=MovieItem()
for name,score in zip(names,scores):
item['names'] = name
item['scores'] = score
#通过判断url地址来进行分类保存数据
if response.url.find('showType=3')!=-1:
item['type']='喜剧'
if response.url.find('showType=3')!=-1:
item['type']='爱情'
yield item
#在pipelines接收item数据 ,分类保存
class SpiderPipeline:
def open_spider(self,spider):
if item['type']=='喜剧'
self.filename=open('movie.txt','w',encoding='utf-8')
if item['type']=='爱情'
self.filename=open('movie.txt','w',encoding='utf-8')
def process_item(self, item, spider):
#item返回的是字典对象,dumps转为字符串对象
#使用item进行保存文件会出现序列化报错,需要把item序列化转化为字典格式
#TypeError: Object of type MovieItem is not JSON serializable
self.filename.write(json.dumps(dict(item),ensure_ascii=False)+'\n')
return item
def close_spider(self,spider):
self.filename.close()
3、如果打印关闭日志,在setting里面加入下面的配置
LOG_ENABLED
默认: True
是否启用logging
关闭日志