python爬虫——scrapy爬取必联网项目信息,并保存在MySQL中

首先设置setting.py的配置:

#设置cookie等:
DEFAULT_REQUEST_HEADERS = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
    'Cookie':'td_cookie=2885562732; __cfduid=da9eaafdfa77f326b2a6493ab987ad3181561096427; loginUser=kzl_knight; Hm_lvt_ce4aeec804d7f7ce44e7dd43acce88db=1565581565,1565664425; JSESSIONID=F831EEBBC3F4DAEFA3788033F85A5B55; Hm_lpvt_ce4aeec804d7f7ce44e7dd43acce88db=1565682498',
}

#开启DOWNLOADER_MIDDLEWARES
DOWNLOADER_MIDDLEWARES = {
   'zhaobiao.middlewares.ZhaobiaoDownloaderMiddleware': 543,
}

#开启管道文件
ITEM_PIPELINES = {
   'zhaobiao.pipelines.ZhaobiaoPipeline': 300,
}

#设置错误输出日志(调试时关闭)
# LOG_FILE = 'zhaobiao.log'
# LOG_LEVEL = 'ERROR'

#代码主体

# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy

class BilianSpider(scrapy.Spider):
    name = 'bilian'
    allowed_domains = ['ebnew.com','ss.ebnew.com']
    #搜索关键字
    keyword_s = [
        '路由器','变压器'
    ]
    # 存储的数据格式
    sql_data = dict(
        projectcode = '',  # 项目编号
        web = '',  # 信息来源网站
        keyword = '',  # 关键字
        detail_url = '',  # 招标详细页网址
        title = '',  # 第三方网站发布标题
        toptype = '',  # 信息类型
        province = '',  # 归属省份
        product = '',  # 产品范畴
        industry = '',  # 归属行业
        tendering_manner = '',  # 招标方式
        publicity_date = '',  # 招标公示日期
        expiry_date = '',  # 招标截止时间
    )
    # Form表单的数据格式
    form_data = dict(
        infoClassCodes = '',
        rangeType = '',
        projectType = 'bid',
        fundSourceCodes = '',
        dateType = '',
        startDateCode = '',
        endDateCode = '',
        normIndustry = '',
        normIndustryName = '',
        zone = '',
        zoneName = '',
        zoneText = '',
        key = '',   # 搜索的关键字
        pubDateType = '',
        pubDateBegin = '',
        pubDateEnd = '',
        sortMethod = 'timeDesc',
        orgName = '',
        currentPage = '',   # 当前页码
    )


    def start_requests(self):
        for keyword in self.keyword_s:
            #深拷贝,会开辟一个新空间
            form_data = deepcopy(self.form_data)
            form_data['key'] = keyword
            form_data['currentPage'] = '1'
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_start
            )
            #传递form_data
            request.meta['form_data'] = form_data
            yield request
        # yield scrapy.Request(
        #     url='http://www.ebnew.com/businessShow/631160959.html',
        #     callback=self.parse_page2
        # )
        # form_data = self.form_data
        # form_data['key'] = '路由器'
        # form_data['currentPage'] = '2'
        # yield scrapy.FormRequest(
        #     url='http://ss.ebnew.com/tradingSearch/index.htm',
        #     formdata=form_data,
        #     callback=self.parse_page1,
        # )

    def parse_start(self,response):
        a_text_s = response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        page_max = max(
            [int(a_text) for a_text in a_text_s if re.match('\d+', a_text)]
        )
        page_max = 2
        #手动调用回调函数
        self.parse_page1(response)
        for page in range(2,page_max+1):
            #深拷贝传递过来的form_data
            form_data = deepcopy(response.meta['form_data'])
            #设置页码
            form_data['currentPage'] = str(page)
            #表单请求
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_page1
            )
            request.meta['form_data'] = form_data
            yield request

    def parse_page1(self,response):
        form_data = response.meta['form_data']
        keyword = form_data.get('key')
        content_list_x_s = response.xpath('//div[@class="ebnew-content-list"]/div')
        for content_list_x in content_list_x_s:
            sql_data = deepcopy(self.sql_data)
            sql_data['toptype'] = content_list_x.xpath('./div[1]/i[1]/text()').extract_first()
            sql_data['title'] = content_list_x.xpath('./div[1]/a/text()').extract_first()
            sql_data['publicity_date'] = content_list_x.xpath('./div[1]/i[2]/text()').extract_first()
            if sql_data['publicity_date']:
                sql_data['publicity_date'] = re.sub('[^0-9\-]', '', sql_data['publicity_date'])

            sql_data['tendering_manner'] = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
            sql_data['product'] = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
            sql_data['expiry_date'] = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            sql_data['province'] = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
            sql_data['detail_url'] = content_list_x.xpath('./div[1]/a/@href').extract_first()
            sql_data['keyword'] = keyword
            sql_data['web'] = '必联网'

            request = scrapy.Request(
                url=sql_data['detail_url'],
                callback=self.parse_page2
            )
            request.meta['sql_data'] = sql_data
            yield request


    def parse_page2(self,response):
        sql_data = response.meta['sql_data']
        sql_data['projectcode'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first()
        sql_data['industry'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[8]/span[2]/text()').extract_first()
        if not sql_data['projectcode']:
            projectcode_find = re.findall('项目编号[::]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})',response.body.decode('utf-8'))
            sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
        # print('parse_2',sql_data)
        yield sql_data

#管道文件,数据保存在mysql中:

别忘了创建数据库,创建表结构 

import pymysql


class ZhaobiaoPipeline(object):
    def __init__(self):
        self.mysql_conn = pymysql.Connection(
            host='localhost',
            port=3306,
            user='root',
            password='',
            database='zhaobiao',
            charset='utf8',
        )

    def process_item(self, item, spider):
        # 创建光标对象
        cs = self.mysql_conn.cursor()
        sql_column = ','.join([key for key in item.keys()])
        sql_value = ','.join(['"%s"' % item[key]for key in item.keys()])
        sql_str = 'insert into t_zhaobiao (%s) value (%s);' % (sql_column,sql_value)
        cs.execute(sql_str)
        self.mysql_conn.commit()
        return item

 

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值