第一步:创建项目
scrapy startproject project_name
第二步:scrapy项目的文件结构
scrapy.cfg:项目的配置文件
items.py:项目中的 item 文件
pipelines.py:项目中的 pipelines 文件
settings.py:项目的设置文件
spiders/:放置 spider 代码的文件夹
第三步:定义 Item
Item 是保存爬取到的数据的容器;其使用方法和 Python 字典类似,并且提供了额外保护机制来避免拼写错误导致的未定义字段错误。
例:
title = scrapy.Field() # 电影名字
movieInfo = scrapy.Field() # 电影的描述信息,包括导演、主演、电影类型等等
star = scrapy.Field() # 电影评分
quote = scrapy.Field() # 电影中最经典或者说脍炙人口的一句话
第四步:编写类 并引入一些包, 设置项目为当前根目录, 右击文件夹,make directory as
import scrapy
from scrapy.spiders import CrawlSpider
from scrapy.http import Request
from scrapy.selector import Selector
from chuchutong.items import ChuchutongItem
第五步:分析循环列表(如果是循环爬取一些信息,则分析html结构,得到循环部分,开始遍历。Xpath)
class Chuchutong(CrawlSpider):
name = 'chuchutong'
start_urls = ['https://m.chuchutong.com/']
def parse(self, response):
selector = Selector(response)
#Movies = selector.xpath('//*[@id="dayBQ"]')
Movies = selector.xpath('//*[@id="dayBQ"]')
#print ('------------')
#print(Movies)
#exit()
for eachMovie in Movies:
item = ChuchutongItem()
#biaoti = eachMovie.xpath('div[@class="hd"]/a/span/text()').extract()
biaoti = eachMovie.xpath('//*[@id="dayBQ"]/li[1]/section/h2/text()').extract()
print ('-------------------------------------->>>>>>>>>>>>>>')
print(biaoti)
完整代码
# -*- coding:utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider
from scrapy.http import Request
from scrapy.selector import Selector
from doubanmovie.items import DoubanmovieItem
class Douban(CrawlSpider):
name = 'douban'
start_urls = ['http://movie.douban.com/top250']
def parse(self, response):
selector = Selector(response)
Movies = selector.xpath('//div[@class="info"]')
for eachMovie in Movies:
item = DoubanmovieItem()
title = eachMovie.xpath('div[@class="hd"]/a/span/text()').extract()
fullTitle = ''
for each in title:
fullTitle += each
movieInfo = eachMovie.xpath('div[@class="bd"]/p/text()').extract()
star = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span/text()').extract()[0]
critical = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span/text()').extract()[1]
quote = eachMovie.xpath('div[@class="bd"]/p[@class="quote"]/span/text()').extract()
#quote可能为空,因此需要先进行判断
if quote:
quote = quote[0]
else:
quote = ''
item['title'] =fullTitle
item['movieInfo'] = ';'.join(movieInfo)
item['star'] = star
item['critical'] = critical
item['quote'] = quote
yield item #提交生成csv文件
nextLink = selector.xpath('//span[@class="next"]/link/@href').extract()
#第10页是最后一页,没有下一页的链接
if nextLink:
nextLink = nextLink[0]
url = "https://movie.douban.com/top250" + nextLink
yield scrapy.Request(url=url,callback=self.parse,dont_filter=True)