python:爬虫学习与教学(4)scrapy及爬豆瓣

scrapy

使用Scrapy抓取一个网站一共需要四个步骤:
创建一个Scrapy项目;
定义Item容器;
编写爬虫;
存储内容。

安装:

pip install scrapy

pip freeze > requirements.txt

测试是否安装成功:

scrapy startprojetc hello   #创建一个项目

cd hello   #进入项目目录

scrapy genspider baidu www.baidu.com    #生成一个爬虫baidu

scrapy crawl baidu        #启动这个爬虫,若无错误,则说明安装成功


爬取quotes.toscrape.com

目录结构:

爬一页:

修改items.py:

import scrapy
class QuoteItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    text=scrapy.Field()
    author=scrapy.Field()
    tags=scrapy.Field()

修改:quotes.py

import scrapy

from ..items import QuoteItem
class QuotesSpider(scrapy.Spider):
    name = 'quotes'
    allowed_domains = ['quotes.toscrape.com']
    start_urls = ['http://quotes.toscrape.com/']

    def parse(self, response):
        quotes=response.css('.quote')
        for quote in quotes:
            item=QuoteItem()
            text=quote.css('.text::text').extract_first()
            author=quote.css('.author::text').extract_first()
            tags=quote.css('.tags .tag::text').extract()

            item['text']=text
            item['author']=author
            item['tags']=tags
            yield item

 

scrapy crawl quotes   #重新爬取


爬多页:

import scrapy

from ..items import QuoteItem

class QuotesSpider(scrapy.Spider):
    name = 'quotes'
    allowed_domains = ['quotes.toscrape.com']
    start_urls = ['http://quotes.toscrape.com/']

    def parse(self, response):
        quotes=response.css('.quote')
        for quote in quotes:
            item=QuoteItem()
            text=quote.css('.text::text').extract_first()
            author=quote.css('.author::text').extract_first()
            tags=quote.css('.tags .tag::text').extract()

            item['text']=text
            item['author']=author
            item['tags']=tags
            yield item
        #实现翻页功能:
        next=response.css('.pager .next a::attr(href)').extract_first()
        url=response.urljoin(next)
        yield scrapy.Request(url,callback=self.parse)

 

保存爬取结果到json、jl、csv文件:

scrapy crawl quotes -o quotes.json

scrapy crawl quotes -o quotes.jl

scrapy crawl quotes -o quotes.csv

scrapy crawl quotes -o quotes.xml

 

 


基本使用:

目标网站,scrapy测试:http://quotes.toscrape.com/

scrapy提供了shell功能,可以在命令行通过命令交互:

scrapy shell quotes.toscrape.com

在交互命令行下:

quotes=response.css('.quote')

quotes

进行调试

调出帮助命令:shelp()

quotes[0].css('.text')

quotes[0].css('.text::text').extrac_first()

#quotes[0].css('.text::text').extrac()

quotes[0].css('.tags .tag::text').extract() 

 

 

 

 


爬豆瓣:

https://movie.douban.com/top250

2个模块:Requests、BeautifulSoup4

pip3 install requests


豆瓣电视剧爬虫

# coding=utf-8
import requests
import json

class DoubanSpider():
    def __init__(self):
        self.url_temp_list=[                      {"url_temp":"https://m.douban.com/rexxar/api/v2/subject_collection/tv_american/items?start={}&count=18&loc_id=108288",
                        "country":"US"},
                        {"url_temp":"https://m.douban.com/rexxar/api/v2/subject_collection/tv_domestic/items?start={}&count=18&loc_id=108288",
                         "country":"CN"},
                         
{"url_temp": "https://m.douban.com/rexxar/api/v2/subject_collection/tv_korean/items?start={}&count=18&loc_id=108288",
                         "country":"KR"},
                        {"url_temp":"https://m.douban.com/rexxar/api/v2/subject_collection/tv_japanese/items?start={}&count=18&loc_id=108288",
                         "country":"JP"}]

self.headers = {
            "Referer": "https: // m.douban.com / tv / american",
            "User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}

    def parse_url(self,url):# 发送请求,获取响应
        print(url)
        response = requests.get(url,headers=self.headers)
        return response.content.decode()

    def get_content_list(self,json_str):
        dict_ret = json.loads(json_str)
        content_list = dict_ret["subject_collection_items"]
        total = dict_ret["total"]
        return content_list,total

    def save_content_list(self,content_list,country):#保存

        with open("douban.txt","a",encoding="utf-8") as f:
            for content in content_list:
                content["country"]=country
                f.write(json.dumps(content,ensure_ascii=False))
                f.write("\n")#写入换行符进行换行
        print("保存成功")

    def run(self):# 实现主要逻辑
        for url_temp in self.url_temp_list:
            num=0
            total = 100 # 假设有第一页
            while num<total+18:
                # 1.start_utl
                url = url_temp["url_temp"].format(num)
                #2.发送请求,获取响应
                json_str = self.parse_url(url)
                #3.提取数据
                content_list,total = self.get_content_list(json_str)
                #4.保存
                self.save_content_list(content_list,url_temp["country"])
                # if len(content_list)<18:
                #     break
                #5.构造下一页的url地址,进入循环
                num +=18

if __name__ == "__main__" :
    douban_spider = DoubanSpider()
    douban_spider.run()


豆瓣书籍爬取

import requests
import json

class DoubanBook_Spider():
    def __init__(self):
        self.url_temp_list = [
            {"url_temp":"https://m.douban.com/rexxar/api/v2/subject_collection/book_fiction/items?start=0&count=18&loc_id=0",
             "book":"fiction"},
            {"url_temp":"https://m.douban.com/rexxar/api/v2/subject_collection/book_nonfiction/items?start=0&count=18&loc_id=0",
             "book":"nofiction"},
            {"url_temp":"https://m.douban.com/rexxar/api/v2/subject_collection/book_classic/items?start=0&count=18&loc_id=0",
             "book":"classic"}
        ]
        self.headers={
            "Referer": "https://m.douban.com/book/classic",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
        }

    def parse_url(self,url):# 发送请求,获取响应
        print(url)
        response = requests.get(url,headers=self.headers)
        return response.content.decode()

    def get_content_list(self,json_str):#  提取数据
        dict_ret = json.loads(json_str)
        content_list = dict_ret["subject_collection_items"]
        total = dict_ret["total"]
        return content_list,total

    def save_content_list(self,content_list,book):
        with open("book_list.txt","a",encoding="utf-8")as f:
            for content in content_list:
                content["book"]= book
                f.write(json.dumps(content, ensure_ascii=False))
                f.write("\n")  # 写入换行符进行换行
            print("保存成功")

    def run(self):
        for url_temp in self.url_temp_list:
            num = 0
            total = 100  # 假设有第一页
            while num < total + 18:
                # 1.start_utl
                url = url_temp["url_temp"].format(num)
                # 2.发送请求,获取响应
                json_str = self.parse_url(url)
                # 3.提取是数据
                content_list, total = self.get_content_list(json_str)
                # 4.保存
                self.save_content_list(content_list, url_temp["book"])
                # if len(content_list)<18:
                #     break
                # 5.构造下一页的url地址,进入循环
                num += 18

if __name__ == "__main__":
    douban_spider = DoubanBook_Spider()
    douban_spider.run()

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值