scrapy--dytt(电影天堂)

  喜欢看电影的小伙伴,如果想看新的电影,然后没去看电影院看,没有正确的获得好的方法,大家就可以在电影天堂里进行下载。这里给大家提供一种思路。

1.dytt.py

# -*- coding: utf-8 -*-
import scrapy
from Dytt.items import DyttItem
from scrapy.linkextractors import LinkExtractor
from Dytt.settings import USER_AGENT
import pdb

class DyttSpider(scrapy.Spider):
    name = 'dytt'
    allowed_domains = ['www.dy2018.com']

    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Connection': 'keep-alive',
        'Content-Length': '11',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Host': 'www.dy2018.com',
        'Origin': 'http://www.dy2018.com',
        'Referer': 'http://www.dy2018.com/html/tv/oumeitv/index.html',
        'User-Agent': USER_AGENT,
        'X-Requested-With': 'XMLHttpRequest',

    }

    start_urls = ['http://www.dy2018.com/html/tv/oumeitv/index.html']

    def parse(self, response):
        le = LinkExtractor(restrict_css='div.co_area1 div.co_content2')
        for link in le.extract_links(response):
            yield scrapy.Request(link.url,callback=self.parse_url,headers=self.headers)

    def parse_url(self,response):
        sel = response.css('div#Zoom')
        dytt = DyttItem()
        dytt['china_name'] = sel.xpath('./p/text()').extract()[1]
        dytt['english_name'] = sel.xpath('./p/text()').extract()[2]
        dytt['year'] = sel.xpath('./p/text()').extract()[3]
        dytt['home'] = sel.xpath('./p/text()').extract()[4]
        dytt['type'] = sel.xpath('./p/text()').extract()[5]
        dytt['time'] = sel.xpath('./p/text()').extract()[8]
        dytt['director'] = sel.xpath('./p/text()').extract()[15]
        dytt['role'] = sel.xpath('./p/text()').extract()[16]

        dytt['ftp'] = sel.xpath('(.//tbody)[1]//a/@href').extract()[0]
        dytt['thunder'] = sel.xpath('(.//tbody)[2]//a/@href').extract()[0]

        yield dytt

2.items.py

import scrapy

class DyttItem(scrapy.Item):
    china_name = scrapy.Field()
    english_name = scrapy.Field()
    year = scrapy.Field()
    home = scrapy.Field()
    type = scrapy.Field()
    time = scrapy.Field()
    director = scrapy.Field()
    role = scrapy.Field()

    ftp = scrapy.Field()
    thunder = scrapy.Field()

3.pipelines.py

# -*- coding: utf-8 -*-
import json
import codecs
import chardet

class DyttPipeline(object):

    def open_spider(self, spider):
        self.file = codecs.open('dytt1.json', 'w', encoding='utf-8') ###重要2

    def close_spider(self, spider):
        self.file.close()

    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n" ### 重要3
        self.file.write(line)
        return item

4.settings.py

USER_AGENT ={       #设置浏览器的User_agent
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
}

CONCURRENT_REQUESTS = 16    #同时来16个请求
DOWNLOAD_DELAY = 0.2      #0.2s后开启处理第一个请求

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

FEED_EXPORT_FIELDS = ['china_name','english_name','year','home','type','time','director','role','ftp','thunder']

COOKIES_ENABLED = False

ITEM_PIPELINES = {
    'Dytt.pipelines.DyttPipeline': 300,
}

在pipelines.py文件中写入.json格式

2.开始爬取时,返回EORRO 400

解决办法:在dytt.py文件中添加:header,重新运行

3.无法爬取thunder链接,在网页中点击查看源码(真正爬取的信息),即可发现--无法显示thunder具体信息

查看源代码:

如果有遇到其他问题的小伙伴,欢迎留言!!!

转载于:https://www.cnblogs.com/eilinge/p/9402099.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,下面是简单的步骤: 1. 安装 scrapyscrapy-redis ```python pip install scrapy scrapy-redis ``` 2. 创建一个 Scrapy 项目 ```python scrapy startproject douban_top250 ``` 3. 在 settings.py 中加入 scrapy-redis 的相关设置 ```python # 使用 scrapy-redis 的调度器 SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 使用 scrapy-redis 的去重过滤器 DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" # 允许暂停、恢复爬虫 SCHEDULER_PERSIST = True # 设置 redis 为 item pipeline ITEM_PIPELINES = { 'scrapy_redis.pipelines.RedisPipeline': 300 } # 设置 redis 服务器地址和端口号 REDIS_HOST = 'localhost' REDIS_PORT = 6379 ``` 4. 编写 spider ```python # coding:utf-8 import scrapy from scrapy import Request from scrapy_redis.spiders import RedisSpider class DoubanTop250Spider(RedisSpider): """爬取豆瓣电影 Top 250""" name = 'douban_top250' allowed_domains = ['movie.douban.com'] # 在 Redis 中设置起始爬取的 URL redis_key = 'douban:start_urls' def parse(self, response): # 获取电影列表 movie_list = response.css('.grid_view li') for movie in movie_list: # 获取电影名字和详情页 URL title = movie.css('.title::text').extract_first() detail_url = movie.css('.hd a::attr(href)').extract_first() yield Request(detail_url, callback=self.parse_detail, meta={'title': title}) # 获取下一页的链接 next_url = response.css('.next a::attr(href)').extract_first() if next_url: yield Request(next_url, callback=self.parse) def parse_detail(self, response): # 获取电影详情 title = response.meta['title'] score = response.css('.rating_num::text').extract_first() yield {'title': title, 'score': score} ``` 5. 运行 scrapy-redis 爬虫 首先要在终端中启动 redis 服务: ```python redis-server ``` 然后在一个终端中运行 scrapy-redis 爬虫: ```python scrapy runspider douban_top250.py ``` 在另一个终端中向 Redis 中添加起始 URL: ```python redis-cli lpush douban:start_urls https://movie.douban.com/top250 ``` 爬取结果会自动保存到 Redis 数据库中。你可以通过 redis-cli 或者其他 Redis 客户端查看结果。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值