db.py
爬虫文件
# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy import Request
from copy import deepcopy
class DbSpider(scrapy.Spider):
name = 'db'
allowed_domains = ['douban.com']
start_urls = ['https://movie.douban.com/top250',]
def parse(self, response):
li_list=response.xpath('//*[@id="content"]/div/div[1]/ol/li')
for li in li_list:
item = {}
item['url'] = li.xpath('./div/div/a/@href').extract_first()
item['title'] = li.xpath('./div/div/a/img/@alt').extract_first()
item['year'] = re.findall('\d{4}',li.xpath('./div/div[2]/div[2]/p[1]/text()').extract()[1])[0]
item['title']=li.xpath('.div/div[2]/div[1]/a/text()').extract_first()
# print(item['url'])
yield Request(item['url'],callback=self.next_url_parse,meta={'item':item})
def next_url_parse(self,response):
item=deepcopy(response.meta['item'])
item['star']=response.xpath('//*[@id="interest_sectl"]/div[1]/div[2]/strong/text()').extract_first()
# print(item)
# lis=ppend(item)
# url = response.urljoin(next)
yield scrapy.Request(item['url'], callback=self.next_url_parse,meta={'item':item})
# yield item