python爬虫——Scrapy爬取博客数据

新建一个Scrapy文件:

# -*- coding: utf-8 -*-
import scrapy

class CsdnBlogSpider(scrapy.Spider):
    name = 'csdn_blog'
    allowed_domains = ['blog.csdn.net']
    keyword = 'another'

    def start_requests(self):
        for pn in range(1, 11):
            url =  'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0' % (
            pn, self.keyword)
            yield scrapy.Request(
                url=url,
                callback=self.parse
            )

    def parse(self, response):
        href_s = response.xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href').extract()
        for href in href_s:
            yield scrapy.Request(
                url=href,
                callback=self.parse2
            )

    def parse2(self, response):
        item = dict(
            #获取第一个值等同于xpath('//h1[@class="title-article"]/text()')[0]
            title = response.xpath('//h1[@class="title-article"]/text()').extract_first(),
            #获取字节数据
            data = response.body
        )
        yield item
    # start_urls = ['http://blog.csdn.net/']
    #
    # def parse(self, response):
    #     pass

将setting.py文件下的这两个函数取消注释

DOWNLOADER_MIDDLEWARES = {
   's1.middlewares.S1DownloaderMiddleware': 543,
}

ITEM_PIPELINES = {
   's1.pipelines.S1Pipeline': 300,
}

在middlewares.py文件下修改DownloaderMiddleware类下的process_request函数:

  #一般重写这里
    def process_request(self, request, spider):
        #这里是bytes类型,所以要导入Headers的包
        request.headers = Headers(
            {
                'User_Agent': user_agent.get_user_agent_pc(),
            }
        )
        #设置代理IP
        request.meta['proxy'] ='http://'+ ur.urlopen('http://api.ip.data5u.com/dynamic/get.html?order=06b5d4a85d10b5cbe9db1e5a3b9fa2e1&sep=4').read().decode('utf-8').strip()

修改pipelines.py文件:


class S1Pipeline(object):
    def process_item(self, item, spider):
        with open('blog_html/%s.html' % item['title'], 'wb') as f:
            f.write(item['data'])
        return item

然后基本就OK,不想在控制台敲scrapy crawl 项目名的话,可以再spiders文件夹下添加一个start文件写入如下代码:

from scrapy import cmdline

cmdline.execute('scrapy crawl 项目名'.split())

完成。

下面是非分布式爬取,作用和上面一样:

没有加入代理IP慎用,玩意被封了不太好。

import urllib.request as ur
import lxml.etree as le
import user_agent
import re

def getRequest(url):
    return ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie': 'TY_SESSION_ID=14e93d1c-5cfb-4692-8416-dc2df061bb5c; JSESSIONID=68E9815DA238619AB37E640211691B8B; uuid_tt_dd=10_20594510460-1585746871024-545182; dc_session_id=10_1585746871024.456447; dc_sid=e127d0cf2db7a2e5cf7ded7b0b7d1880; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1585746876; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_20594510460-1585746871024-545182; c-toolbar-writeguide=1; announcement=%257B%2522isLogin%2522%253Afalse%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fblog.csdn.net%252Fblogdevteam%252Farticle%252Fdetails%252F105203745%2522%252C%2522announcementCount%2522%253A1%252C%2522announcementExpire%2522%253A78705482%257D; firstDie=1; __guid=129686286.421372154518304900.1585746901554.712; monitor_count=1; c_ref=https%3A//blog.csdn.net/; __gads=ID=86722e1f5d97e31d:T=1585746904:S=ALNI_MaIZXWpb5EgzqK0TDZB-yNS9h6l_g; searchHistoryArray=%255B%2522python%2522%252C%2522opencv%2522%255D; dc_tos=q8426m; c-login-auto=3; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1585746960'
        }
    )
hot_word=['C++','java','python','PHP','Go','Objective-C','SQL','PL/SQL','C','Swift','Swift','Visual Basic',]

if __name__ == '__main__':
    keyword = input('请输入关键词:')
    pn_start = int(input('起始页:'))
    pn_end = int(input('终止页:'))
    for pn in range(pn_start, pn_end + 1):
        request = getRequest(
            'https://so.csdn.net/so/search/s.do?p=%s&q=%s&t=blog&viparticle=&domain=&o=&s=&u=&l=&f=&rbg=0' % (
            pn, keyword)
        )
    response = ur.urlopen(request).read()
    href_s = le.HTML(response).xpath('//div[@class="search-list-con"]/dl//span[@class="mr16"]/../../dt/div/a[1]/@href')
    for href in href_s:
        try:
            response_blog = ur.urlopen(
                getRequest(href)
            ).read()
            title = le.HTML(response_blog).xpath('//h1[@class="title-article"]/text()')[0]

            title = re.sub(
                r'[\//:*<>|"?]', '', title
            )
            with open('blog/%s.html' % title, 'wb') as f:
                f.write(response_blog)
            print(title)
        except Exception as e:
            print(e)

# def getProxyOpener():
#     # proxy_address = ur.urlopen(
#     #     'http://api.ip.data5u.com/dynamic/get.html?order=d314e5e5e19b0dfd19762f98308114ba&sep=4').read().decode(
#     #     'utf-8').strip()
#     proxy_handler = ur.ProxyHandler(
#         {
#             'http': '58.218.214.147:4029'
#         }
#     )
#     return ur.build_opener(proxy_handler)
#  print(response)
 #   print(href_s)

 

好的,以下是用Scrapy爬取豆瓣TOP250的教程: 1. 创建Scrapy项目 打开命令行窗口,进入你想要创建项目的目录,输入以下命令来创建一个Scrapy项目: ``` scrapy startproject douban_top250 ``` 其中,`douban_top250`是项目的名称。 2. 创建Spider 进入刚才创建的项目目录,输入以下命令来创建一个Spider: ``` scrapy genspider douban_movie https://movie.douban.com/top250 ``` 其中,`douban_movie`是Spider的名称,`https://movie.douban.com/top250`是要爬取的网址。 3. 编写Spider代码 打开`douban_top250/spiders/douban_movie.py`文件,将以下代码复制进去: ```python import scrapy from douban_top250.items import DoubanTop250Item class DoubanMovieSpider(scrapy.Spider): name = 'douban_movie' allowed_domains = ['movie.douban.com'] start_urls = ['https://movie.douban.com/top250'] def parse(self, response): movie_list = response.xpath('//div[@class="article"]//ol[@class="grid_view"]/li') for movie in movie_list: item = DoubanTop250Item() item['rank'] = movie.xpath('.//em/text()').get() item['title'] = movie.xpath('.//span[@class="title"][1]/text()').get() item['year'] = movie.xpath('.//span[@class="title"][2]/text()').get()[1:-1] item['score'] = movie.xpath('.//span[@class="rating_num"]/text()').get() yield item ``` 代码中用到的`DoubanTop250Item`是一个自定义的Item,需要在`douban_top250/items.py`中定义: ```python import scrapy class DoubanTop250Item(scrapy.Item): rank = scrapy.Field() title = scrapy.Field() year = scrapy.Field() score = scrapy.Field() ``` 4. 运行Spider 在命令行窗口中进入项目目录,输入以下命令来运行Spider: ``` scrapy crawl douban_movie -o douban_top250.csv ``` 其中,`douban_movie`是Spider的名称,`douban_top250.csv`是要保存数据的文件名。 5. 查看结果 在项目目录下会生成一个`douban_top250.csv`文件,打开即可查看爬取结果。 以上就是用Scrapy爬取豆瓣TOP250的教程,希望能帮助到你!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值