PythonScrapy_Tencent-(其他 “源代码案例” 请点开我的博客即可)

spider数据

# -*- coding: utf-8 -*-
import scrapy


class TencentspiderSpider(scrapy.Spider):
    name = 'tencentSpider'
    allowed_domains = ['tencent.com']
    # start_urls = ['http://tencent.com/']
    start_urls = ['https://hr.tencent.com/position.php']

    # scrapy会通过自身的引擎,得到url地址后,自动请求
    # (这里是start_urls,如果重复调用parse,url地址根据详情而定)
    # 最后得到可以解析xpath的response对象。
    # 该response自动传入到  该解析函数的parse的response参数
    # 我们只需要使用response即可
    def parse(self, response):


        # response.xpath("//tr//a[@target='_blank']/text()")
        xpath_list = response.xpath("//tr[contains(@class,'even') or contains(@class,'odd')]")
        for i in xpath_list:
            item = {}
            # 类别
            item["category"] = i.xpath(".//td/text()").extract_first()
            # 职位
            item["position"] = i.xpath(".//a[@target='_blank']/text()").extract_first()
            # 地址
            item["href"] = "https://hr.tencent.com/" + i.xpath(".//a[@target='_blank']/@href").extract_first()

            print(item)
            print()

            # 通过“yield关键字”
            # 将spider中的数据传输至pipeline中
            yield item


        # 获取下一页地址
        url = response.xpath("//a[@id='next']/@href").extract_first()
        next_url = "https://hr.tencent.com/" + url
        if url != "javascript:;":
            # 通过“yield关键字” 才能循环迭代该代码,重复执行
            # (每循环执行完一个就将“对应的占用内存”释放)
            # callback:指定传入的url交给哪个函数去解析
            yield scrapy.Request(next_url,callback=self.parse)



 

pipeline数据

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

"""
pymongo_与mongoDB数据库建立连接

"""
from pymongo import MongoClient

#实例化client,建立连接
# host="127.0.0.1":表示链接本地mongoDB
# port=27017:表示mongoDB端口号
client = MongoClient(host="127.0.0.1",port=27017)

# 与数据库中的集合建立连接
# client["test"]["t1"]:test表示数据库名称,t1表示集合
collection = client["hpr"]["Tencent_data"]


class ScrapyTencentPipeline(object):
    def process_item(self, item, spider):

        # 将传递过来的数据,通过对象item与pymongo存储至
        # mongoDB数据库
        collection.insert(item)


        return item

 

setting数据

# -*- coding: utf-8 -*-

# Scrapy settings for Scrapy_Tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Scrapy_Tencent'

SPIDER_MODULES = ['Scrapy_Tencent.spiders']
NEWSPIDER_MODULE = 'Scrapy_Tencent.spiders'

LOG_LEVEL = "WARNING"
# Crawl responsibly by identifying yourself (and your website) on the user-agent

# 设置USER_AGENT访问头,迷惑想要爬取的网站
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'Scrapy_Tencent.middlewares.ScrapyTencentSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'Scrapy_Tencent.middlewares.ScrapyTencentDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html

# 开启Item ,允许spider传输数据到pipeline中
ITEM_PIPELINES = {
   'Scrapy_Tencent.pipelines.ScrapyTencentPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

 

由于该项目是采用Scrapy框架完成,为了内容的完整性(一些配置内容),我将文件打包了一下,大家能够更好的查看,希望与大家一起进步!

少年们,大叔们,共勉之!

由于文件不能直接嵌入到博客中,请看链接(csdn的文件好像最低也是1积分,无法是0):

https://download.csdn.net/download/hprissobad/10547605

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值