scrapy项目实战--简书全站爬取

 

目录

创建项目

start.py

js.py

items.py

middlewares.py

 pipelines.py

settings.py

mysql创建表


创建项目

scrapy startproject jianshu_spider

cd .\jianshu_spider\

scrapy genspider -t crawl js "jianshu.com"

start.py

from scrapy import cmdline

cmdline.execute("scrapy crawl js".split())

js.py

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from jianshu_spider.items import ArticleItem

class JsSpider(CrawlSpider):
    name = 'js'
    allowed_domains = ['jianshu.com']
    start_urls = ['https://www.jianshu.com/']

    rules = (
        Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}'), callback='parse_detail', follow=True),
    )

    def parse_detail(self, response):
        title = response.xpath('//h1[@class="_1RuRku"]/text()').get()
        author = response.xpath('//span[@class="FxYr8x"]/a/text()').get()
        author_pic = response.xpath('//a[@class="_1qp91i _1OhGeD"]/img/@src').get()
        pub_time = response.xpath('//div[@class="s-dsoj"]//time/text()').get()
        content = response.xpath('//article').get()
        url = response.url
        article_id = url.split('/')[-1]
        read_count = response.xpath('//div[@class="s-dsoj"]//span[3]/text()').get()
        like_count = response.xpath('//div[@class="s-dsoj"]//span[1]/text()').get()
        word_count = response.xpath('//div[@class="s-dsoj"]//span[2]/text()').get()
        comment_count = response.xpath('//div[@class="_10KzV0"]/span[2]/text()').get()
        subjects = ",".join(response.xpath('//div[@class="_2Nttfz"]/a/span/text()').getall())



        item = ArticleItem(
            title = title,
            author = author,
            author_pic = author_pic,
            pub_time = pub_time,
            content = content,
            origin_url = response.url,
            article_id = article_id,
            read_count = read_count,
            like_count = like_count,
            word_count = word_count,
            comment_count = comment_count,
            subjects = subjects
        )
        # print(item)
        yield item

items.py

import scrapy


class ArticleItem(scrapy.Item):
    title = scrapy.Field()
    content = scrapy.Field()
    article_id = scrapy.Field()
    origin_url = scrapy.Field()
    author = scrapy.Field()
    author_pic = scrapy.Field()
    pub_time = scrapy.Field()
    read_count = scrapy.Field()
    like_count = scrapy.Field()
    word_count = scrapy.Field()
    comment_count = scrapy.Field()
    subjects = scrapy.Field()

middlewares.py

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
from selenium import webdriver
import time
from scrapy.http.response.html import HtmlResponse

class SeleniumDownloadMiddleware(object):
    def __init__(self):
        self.driver = webdriver.Chrome()
    def process_request(self,request,spider):
        self.driver.get(request.url)
        time.sleep(2)
        try:
            while True:
                showmore = self.driver.find_element_by_xpath('//div[@class="col-xs-16 main"]/a')
                showmore.click()
                time.sleep(0.3)
                if not showmore:
                    break
        except:
            pass
        source = self.driver.page_source
        response = HtmlResponse(url = self.driver.current_url,body = source,request = request,encoding='utf-8')
        return response

 pipelines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import pymysql
from twisted.enterprise import adbapi
from pymysql import cursors

class JianshuSpiderPipeline:
    def __init__(self):
        dbparams = {
            'host' : '127.0.0.1',
            'port' : 3306,
            'user' : 'root',
            'password' : 'yyy99999',
            'database' : 'jianshu',
            'charset' : 'utf8'
        }
        self.conn = pymysql.connect(**dbparams)
        self.cursor = self.conn.cursor()
        self._sql = None

    def process_item(self, item, spider):
        self.cursor.execute(self.sql,(item['title'],item['content'],item['author'],item['author_pic'],item['pub_time'],item['origin_url'],item['article_id']))
        self.conn.commit()
        return item

    @property
    def sql(self):
        if not self._sql:
            self._sql = """
                insert into article(id,title,content,author,author_pic,pub_time,origin_url,article_id) values(null,%s,%s,%s,%s,%s,%s,%s)
            """
            return self._sql
        return self._sql

class JianshuTwistedPipeline(object):
    def __init__(self):
        dbparams = {
            'host' : '127.0.0.1',
            'port' : 3306,
            'user' : 'root',  #数据库用户名
            'password' : 'yyy99999',   #数据库密码
            'database' : 'jianshu',   #数据库名
            'charset' : 'utf8',
            'cursorclass' : cursors.DictCursor
        }
        self.dbpool = adbapi.ConnectionPool('pymysql',**dbparams)
        self._sql = None

    @property
    def sql(self):
        if not self._sql:
            self._sql = """
                insert into article(id,title,content,author,author_pic,pub_time,article_id,origin_url,read_count,like_count,word_count,comment_count,subjects) values(null,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
            """
            return self._sql
        return self._sql
    def process_item(self,item,spider):
        defer = self.dbpool.runInteraction(self.insert_item,item)
        defer.addErrback(self.handle_error,item,spider)

    def insert_item(self,cursor,item):
        cursor.execute(self.sql,(item['title'],item['content'],item['author'],item['author_pic'],item['pub_time'],item['article_id'],item['origin_url'],item['read_count'],item['like_count'],item['word_count'],item['comment_count'],item['subjects']))

    def handle_error(self,error):
        print('---'*6+"error"+'---'*6)
        print(error)
        print('---'*6+"error"+'---'*6)

settings.py

# Scrapy settings for jianshu_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'jianshu_spider'

SPIDER_MODULES = ['jianshu_spider.spiders']
NEWSPIDER_MODULE = 'jianshu_spider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'jianshu_spider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36',
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'jianshu_spider.middlewares.JianshuSpiderSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'jianshu_spider.middlewares.SeleniumDownloadMiddleware': 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    # 'jianshu_spider.pipelines.JianshuSpiderPipeline': 300,
    'jianshu_spider.pipelines.JianshuTwistedPipeline': 300,

}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

mysql创建表

/*
Navicat MySQL Data Transfer

Source Server         : aaa
Source Server Version : 50717
Source Host           : localhost:3306
Source Database       : jianshu

Target Server Type    : MYSQL
Target Server Version : 50717
File Encoding         : 65001

Date: 2021-09-22 14:38:29
*/

SET FOREIGN_KEY_CHECKS=0;

-- ----------------------------
-- Table structure for article
-- ----------------------------
DROP TABLE IF EXISTS `article`;
CREATE TABLE `article` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `title` varchar(255) DEFAULT NULL,
  `content` longtext,
  `author` varchar(255) DEFAULT NULL,
  `author_pic` varchar(255) DEFAULT NULL,
  `pub_time` datetime(6) DEFAULT NULL,
  `article_id` varchar(20) DEFAULT NULL,
  `origin_url` varchar(255) DEFAULT NULL,
  `read_count` varchar(11) DEFAULT NULL,
  `like_count` varchar(11) DEFAULT NULL,
  `word_count` varchar(11) DEFAULT NULL,
  `comment_count` varchar(11) DEFAULT NULL,
  `subjects` text,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=70 DEFAULT CHARSET=utf8;

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值