scrapy运行越来越慢的问题 大神们帮我看下是什么原因,怎么优化求各位大神给一些建议

# -*- coding: utf-8 -*-
import scrapy
import json
from pymysql import *
from Kuaibao_json.items import KuaibaoJsonItem


class KuaibaoSpider(scrapy.Spider):
    conn = connect(host="10.16.0.116",
                   port=3306,
                   database="bcpc",
                   user="caiji",
                   password="Ath194wsu123",
                   charset='utf8')
    cursor = conn.cursor()
    count = cursor.execute('select KEYWORDS from craw_toutiao_keywords')
    # 获取所有结果
    results = cursor.fetchall()
    result = list(results)
    name = 'kuaibao'
    offset = 0
    allowed_domains = ['r.cnews.qq.com']
    base_url = "http://r.cnews.qq.com/search?mid=8bd4fc7631cb0911414dbf706de95c498b18ea5c&devid=353288083702328&mac=FC%3A42%3A03%3A59%3AF2%3AD2&store=3427&screen_height=1920&apptype=android&origin_imei=353288083702328&hw=samsung_SM-C5000&appversion=3.2.0&appver=23_areading_3.2.0&uid=42367e1ff01651a9&screen_width=1080&sceneid=&android_id=42367e1ff01651a9&Cookie=%20lskey%3D%3B%20luin%3D%3B%20skey%3D%3B%20uin%3D%3B%20logintype%3D0%3B&ssid=CHINASO&omgid=&commonGray=1&currentTab=kuaibao&query="
    url_end = "&activefrom=icon&qqnetwork=wifi&imsi_history=0&omgbizid=&qn-sig=97abf131d6de202d06b92ca641e718c0&qn-rid=04f67c3e-7c3e-400c-add2-cf6831381b10&imsi="
    start_urls = [base_url + page[0] + url_end for page in result]

    def parse(self, response):
        if not "channellist" in json.loads(response.body):
            return
        data_list = json.loads(response.body)["channellist"]["media"]

        if not data_list:
            return

        for data in data_list:
            item = KuaibaoJsonItem()
            item["chlname"] = data["chlname"]
            item["desc"] = data["desc"]
            item["subCount"] = data["subCount"]
            item["url"] = "http://kuaibao.qq.com/s/MEDIANEWSLIST?chlid=" + data["chlid"]
            print item["chlname"]
            yield item

        # for page in self.result:
        #     yield scrapy.Request(url=self.base_url + page[0] + self.url_end, callback=self.parse)
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class KuaibaoJsonItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    # 快报账号名称
    chlname = scrapy.Field()

    # 快报简介
    desc = scrapy.Field()

    # 快报粉丝数
    subCount = scrapy.Field()

    # 快报url地址
    url = scrapy.Field()

    def get_insert_sql(self):
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymysql import *
import json
import pymysql
from twisted.enterprise import adbapi

# class KuaibaoJsonPipeline(object):
#     def open_spider(self,item):
#         self.filename = open("kuaibao.json", "w")
#
#     def process_item(self, item, spider):
#         content = json.dumps(dict(item)) + ",\n"
#         self.filename.write(content)
#         return item
#
#     def close_spider(self, item):
#         self.filename.close()

# class MysqlTwistedPipline(object):
#     def __init__(self):
#         self.conn = connect(host = "10.16.0.116",
#                           port = 3306,
#                           database = "bcpc",
#                           user = "caiji",
#                           password = "Ath194wsu123",
#                           charset = 'utf8')
#         self.cursor = self.conn.cursor()
#
#     def process_item(self, item, spider):
#         insert_sql = """insert into craw_kuaibao_member(chlname, chldesc, subCount, url) VALUES (%s, %s, %s, %s)"""
#         chlname = item["chlname"]
#         chldesc = item["desc"]
#         subCount = item["subCount"]
#         url = item["url"]
#         self.cursor.execute(insert_sql, (chlname, chldesc, subCount, url))
#         self.conn.commit()

class MysqlTwistedPipline(object):
    def __init__(self, ):
        dbparms = dict(host="10.16.0.116",
                       port=3306,
                       database="bcpc",
                       user="caiji",
                       password= "Ath194wsu123",
                       charset = 'utf8')
        # 指定擦做数据库的模块名和数据库参数参数
        self.dbpool = adbapi.ConnectionPool("pymysql", **dbparms)

    # 使用twisted将mysql插入变成异步执行
    def process_item(self, item, spider):
        # 指定操作方法和操作的数据
        query = self.dbpool.runInteraction(self.do_insert, item)
        # 指定异常处理方法
        query.addErrback(self.handle_error, item, spider) #处理异常

    def handle_error(self, failure, item, spider):
        #处理异步插入的异常
        print (failure)

    def do_insert(self, cursor, item):
        #执行具体的插入
        #根据不同的item 构建不同的sql语句并插入到mysql中
        insert_sql, params = item.get_insert_sql()
        cursor.execute(insert_sql, params)
# Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals from tools.craw_toutiaoip import GetIP import random class RandomProxyMiddleware( object): # 动态设置IP def process_request( self , request , spider): get_ip = GetIP() request.meta[ "proxy"] = random.choice(get_ip.get_random_ip()) insert_sql = """insert into craw_kuaibao_member(chlname, chldesc, subCount, url) VALUES (%s, %s, %s, %s)""" params = ( self[ "chlname"] , self[ "desc"] , self[ "subCount"] , self[ "url"]) return insert_sql , params
# -*- coding: utf-8 -*-

# Scrapy settings for Kuaibao_json project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Kuaibao_json'

SPIDER_MODULES = ['Kuaibao_json.spiders']
NEWSPIDER_MODULE = 'Kuaibao_json.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'KuaiBao/4.7.10.120 CFNetwork/897.15 Darwin/17.5.0'
LOG_LEVEL = 'ERROR'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 1.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'Kuaibao_json.middlewares.KuaibaoJsonSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'Kuaibao_json.middlewares.RandomProxyMiddleware': 543,
}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'Kuaibao_json.pipelines.MysqlTwistedPipline': 300,
    # 'Kuaibao_json.pipelines.KuaibaoJsonPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值