python scrapy框架爬取知乎动态页面

3 篇文章 0 订阅
2 篇文章 0 订阅

说明:纯文科生。2个月正式的python代码经验。


目的:用scrapy框架爬取知乎的这个页面:https://www.zhihu.com/search?type=content&q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0
红框框里面的内容



步骤:
一、创建project:cd到要创建项目的目录,执行以下命令。

scrapy startproject zhihuSpider

二、创建spider:cd到spiders文件夹,执行以下命令,ZH是你爬虫的名字,必须唯一;zhihu.com是allowed_domains。

$ scrapy genspider ZH zhihu.com

三、item.py,收集的数据分别有问题的内容、回答者的姓名和自我介绍、答案内容。

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy

# 关于每一个问题的信息
class QuesInfoItem(scrapy.Item):
	question = scrapy.Field() #问题内容
	author_name = scrapy.Field() #作者姓名
	author_bio = scrapy.Field() #作者简介
	answer_content = scrapy.Field() #答案

四、middlewares.py,导入time模块,记录爬虫耗时

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
import time,os


class ZhihuSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Response, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        self.startTime = time.time()
        # print('__file__ is %s' % __file__)
        # print ("path ====== %s " % os.path.normcase(__file__))
        print('   爬虫开始   '.center(50, "*"))
        print(('   开始时间:%.2f   ' % self.startTime).center(50, "*"))

    def spider_closed(self, spider):
        self.endTime = time.time()
        _t = self.endTime - self.startTime
        print(('   结束时间:%.2f   ' % self.endTime).center(50, "*"))
        print(('   耗时:%.2f s   ' % _t).center(50, "*"))
        print('   爬虫结束   '.center(50, "*"))

class MyproxiesSpiderMiddleware(object):

    def __init__(self):  
        self.ips = []
        
    def process_request(self, request, spider):  
        pass
        # if spider.name == 'question':
        #     ip = "https://116.3.94.128:80"
        #     # print("============ 使用代理 %s ============" % ip)
        #     request.meta["proxy"] = ip

五、piplines.py,添加 ZhihuSpiderWriteToCSVPipeline 和 ZhihuSpiderWriteToDBPipeline 类,将抓取的数据保存到csv文件和数据库当中
# -*- coding: utf-8 -*-

# Define your item pipelines here
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import csv
import os,sys
import sqlite3
from ZhihuSpider.items import QuesInfoItem

#数据写入csv文件
class ZhihuSpiderWriteToCSVPipeline(object):

    def open_spider(self, spider):
        # print("abs path is %s" %(os.path.abspath(sys.argv[0])))
        
        self.csvFile = open(os.path.abspath('C:/Users/Administrator/Desktop/ZhihuSpider/test.csv'), "w+",newline='')
        try:
            self.write = csv.writer(self.csvFile)
            self.write.writerow(('question', 'author_name', 'author_bio', 'answer_content'))
        except Exception as e:
            pass 

    def close_spider(self, spider):
        self.csvFile.close()

    def process_item(self, item, spider):
        try:
            self.write.writerow((item["question"], item["author_name"], item["author_bio"], item["answer_content"]))
        except BaseException as e:
            pass
            
        return item

#数据写入数据库文件
class ZhihuSpiderWriteToDBPipeline(object):

    def open_spider(self, spider):
        try:
            self.conn = sqlite3.connect(os.path.abspath('C:/Users/Administrator/Desktop/ZhihuSpider/test.db'))
            self.cursor = self.conn.cursor()
        except BaseException as e:
            pass
            
    def close_spider(self, spider):
        try:
            self.cursor.close()
            self.conn.commit()
            self.conn.close()
        except BaseException as e:
            pass

    def process_item(self, item, spider):
        try:
            if isinstance(item, QuesInfoItem):
                self.cursor.execute('insert into question (question, author_name, author_bio, answer_content) values (?, ?, ?, ?)', (item["question"], item["author_name"], item["author_bio"], item["answer_content"]))
        except BaseException as e:
            print(e)
            pass
            
        return item

六、setting.py,激活各个环节
# -*- coding: utf-8 -*-

# Scrapy settings for zhihuSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'zhihuSpider'

LOG_LEVEL= 'WARNING'

SPIDER_MODULES = ['zhihuSpider.spiders']
NEWSPIDER_MODULE = 'zhihuSpider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhihuSpider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'zh-cn',
  'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
   'zhihuSpider.middlewares.ZhihuspiderSpiderMiddleware': 543,
}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'zhihuSpider.middlewares.MyproxiesSpiderMiddleware': 544,
}

DOWNLOAD_DELAY = 1

# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'zhihuSpider.pipelines.ZhihuspiderWriteToCSVPipeline': 300,  # ZhihuspiderWriteToCSVPipeline 与 pipelines 中 class 名称相同
   'zhihuSpider.pipelines.ZhihuspiderWriteToDBPipeline': 400  # ZhihuspiderWriteToDBPipeline 与 pipelines 中 class 名称相同
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

七、重头的ZH.py
1)代码:
# -*- coding: utf-8 -*-
import scrapy
#import requests
from scrapy import Request
from scrapy.spiders import CrawlSpider
import time
import re
import json
from ZhihuSpider.items import QuesInfoItem

class ZhSpider(CrawlSpider):
    name = 'ZH'
    allowed_domains = ['zhihu.com']
    # start_urls是Spider在启动时进行爬取的入口URL列表。第一个被获取到的页面的URL将是其中一个,
    # 后续的URL从初始的URL的响应中获取
    start_urls = ['https://www.zhihu.com/r/search?q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0',
    'https://www.zhihu.com/r/search?q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0&correction=1&type=content&offset=30',
    ]

    i = 0
    # parse是Spider的一个方法。被调用时,每个初始的URL响应后返回的response对象,将会作为唯一的参数返回给该方法
    # 该方法负责解析返回的数据(respose data)、提取数据(item)以及生成需要进一步处理的URL的Response对象

    def parse(self, response):

        # print('***********************\n',response.body,'***********************\n\n')
        print('*************开始下载json文件:*********************')
        # 1、实现网页的解析,生成item
        # 首先打开js路径,获取'htmls'KEY下面的内容,是一个整体的str文件,没有标KEY,所以用re去解析它
        try:
            # print(type(response.body))
            # print(type(response.text))
            jsDict = json.loads(response.body)
            # print(type(jsDict))
            print('*************开始解析页面*********************')
            questions = jsDict['htmls']
    
            # 抽取所有的问题和对应的follwer_num, answer_num和answer_abstract
            for q in questions:
                item = QuesInfoItem()
                # 删去源代码中关键词“<em>机器学习</em>”的标签
                q = q.replace('<em>','').replace('</em>','')
                # 问题信息在标签 class=\"js-title-link\">和</a>当中
                question = re.findall('class=\"js-title-link\">(.*?)</a>',q)[0]
                print(question)
                item['question'] = question

                time.sleep(2)
        
                # 作者姓名在标签 data-author-name=\"和\" data-entry-url=\"当中
                try:
                    author_name = re.findall('data-author-name=\"(.*?)\" data-entry-url=\"',q)[0]
                    print('作者姓名:',author_name)
                except:
                    author_name = None
                item['author_name'] = author_name
    
                # 作者简介在标签 <span title=\"和\" class=\"bio\">当中
                try:
                    author_bio = re.findall('<span title=\"(.*?)\" class=\"bio\">',q)
                    print('作者简介:',author_bio)
                except:
                    author_bio = None
                item['author_bio'] = author_bio

                time.sleep(2)

                # 回答内容信息在标签 <script type=\"text\" class=\"content\">和</script>当中
                try:
                    answer_content = re.findall('<script type=\"text\" class=\"content\">(.*?)</script>', q)[0]
                    print(answer_content[:100]) #内容太多只打印一部分出来看一下
                except:
                    answer_content = None
                item['answer_content'] = answer_content

                time.sleep(2)

                yield item

            # 2、构造下一页的链接并回调给parse方法
            first_url = 'https://www.zhihu.com/r/search?q=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0'
            # 下一页链接信息在js文件的['paging']标签下的['next']KEY中
            nexturl = jsDict['paging']['next']
            last_url = re.findall('&(.*)', nexturl)[0]
            url = first_url + '&' +last_url
            print(url)
            yield Request(url, callback=self.parse) 

        except json.decoder.JSONDecodeError as e: #这个报错开始是因为找错了url一直报错加的,现在应该没关系可以去掉了
            print('JSONDecodeError')



2)如何找到js的url:查看源代码发现要抽取的内容在<script></script>标签内,是 javascript 动态加载的,Firefox浏览器,F12打开web开发者工具—网络(network),点击页面中的“更多”,在新出现的URL中查看,发现这个链接的响应(response)符合条件。用json.loads()方法将response.body这一byte类型的文件转化为dict类型的文件。


3)如何解析网页:用正则表达式进行解析,只要定位好要抽取的内容的位置,利用(.*?)符号表示要抽取的信息内容。比如:问题信息在标签 class=\"js-title-link\">和</a>当中,所以 question = re.findall('class=\"js-title-link\">(.*?)</a>',q)[0]


4)如何构造下一页url:下一页链接信息在js文件的['paging']标签下的['next']KEY中,抽取并组合一下就好了。


代码链接:https://github.com/MTINGSUN/zhihuspider.git

以上。


  • 2
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值