scrapy框架爬虫爬取网易电子书

笔记:
Scrapy 安装:
pip install scrapy
若错误显示VC++14.0 Twister,则离线安装pip install ×××.whl
验证是否安装成功:scrapy bench
如果报错显示win32,则pip install pywin32

入门实战:
采集目标:网易电子书
1.创建项目
scrapy startproject xxx(项目名字)
2.创建爬虫
scrapy genspider +爬虫名字+网站域名

	注意:
		爬虫名字不要和项目名字一样
		网站域名是允许爬虫采集的域名 baidu.com zhihu.com
3分析网站
	提取数据:(掌握前两个就好)
		正则表达式(基础 必会 难掌握)
		XPath -->从HTML提取数据语法
		Css  -->从HTML提取数据语法

wy.py爬虫文件

import scrapy
from copy import deepcopy


class WySpider(scrapy.Spider):
    name = 'wy'
    allowed_domains = ['yuedu.163.com']
    start_urls = ['http://yuedu.163.com/']

    def parse(self, response):
        item = {}
        div_list = response.xpath('//div[@class = "category"]')[0:2]
        for div in div_list:
            item["category"] = div.xpath('.//h3/text()').extract_first()
            #取大分类之中的小分类
            p_list = div.xpath('.//p/a')
            for p in p_list:
                item["sort"] = p.xpath("./text()").extract_first()
                item["sort_link"] = p.xpath('./@href').extract_first()
                if item["category"] != "图书":
                    item["sort_link"] ="http:" + item["sort_link"]
                elif item["category"] == "图书":    
                    item["sort_link"] ="http://caiwei.yuedu.163.com" + item["sort_link"]
                
                    
                print(item)
                yield scrapy.Request(
                    item["sort_link"],
                    callback = self.parse_book_list,
                    meta = {"item":deepcopy(item)}
                )
                
    def parse_book_list(self,response):
        item = response.meta["item"]
        div_list = response.xpath(".//tbody/tr")
        for div in div_list:
            item["book_name"] = div.xpath('./td[2]//a/text()').extract_first()
            item["author"] = div.xpath('./td[3]/text()').extract_first()
            item["book_link"] = div.xpath('./td[2]//a/@href').extract_first()
            item["book_link"] = "https://yuedu.163.com" + item["book_link"]
            yield scrapy.Request(
                item["book_link"],
                callback=self.parse_book_detail,
                meta = {"item":deepcopy(item)}
            )
    
    def parse_book_detail(self,response):
        item = response.meta["item"]
        div_list = response.xpath('.//div[@class="f-fl"]')
        for div in div_list:
            item["简介"] = div.xpath('./div[1]/text()').extract()
            yield item

settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for wangyireading project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'wangyireading'

SPIDER_MODULES = ['wangyireading.spiders']
NEWSPIDER_MODULE = 'wangyireading.spiders'

LOG_LEVEL = "WARNING"#警告以下的日志不会显示

# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'wangyireading.middlewares.WangyireadingSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'wangyireading.middlewares.WangyireadingDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'wangyireading.pipelines.WangyireadingPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

pipelines.py

import re


class WangyireadingPipeline:
    def process_item(self, item, spider):
        item["简介"] = self.process_content(item["简介"])
        print(item)
        return item

    #对内容进行整理 
    def process_content(self,content):
        content = [re.sub(r'\s|\xa0','',i) for i in content]#将换行什么的符号替换成空格
        content = [i for i in content if len(i) > 0]#将空格号去掉
        return content

部分结果显示
在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值