python分布式爬虫,教学

1. 安装环境依赖:
pip install scrapy-redis -i https://mirror.baidu.com/pypi/simple/
2. 将爬虫的类从 scrapy.Spider 变成 scrapy_redis.spiders.RedisSpider
2.1 将爬虫中的start_urls删掉。增加一个redis_key=“xxx”。这个redis_key是为了以后在redis中控制爬虫启动的。爬虫的第一个url,就是在redis中通过这个发送出去的
3. 更改scrapy的配置文件,将爬虫的去重交由redis完成

在这里插入图片描述

4. 运行爬虫,不再是scrapy crawl 爬虫名,而是在在Redis服务器上,推入一个开始的url链接
scrapy runspider 爬虫名

lpush [redis_key] start_url

在这里插入图片描述

源代码

spider.py

# -*- coding: utf-8 -*-
import scrapy
import re
from fangtianxia_scrapy.items import NewHouseItem, EsfHouseItem
from scrapy_redis.spiders import RedisSpider

class FangSpider(RedisSpider):
    name = 'fang'
    allowed_domains = ['fang.com']
    # start_urls = ['https://www.fang.com/SoufunFamily.htm']
    redis_key = 'fang:start_urls'

    def parse(self, response):
        trs = response.xpath('//div[@class="outCont"]//tr')
        province = None
        for tr in trs:
            tds = tr.xpath('.//td[not(@class)]')
            province_td = tds[0]
            province_text = province_td.xpath('.//text()').get()
            province_text = re.sub(r'\s', '', province_text)
            if province_text:
                province = province_text
            # 不爬取海外城市的房源
            if province == '其它':
                continue

            city_td = tds[1]
            city_links = city_td.xpath('.//a')
            for city_link in city_links:
                city = city_link.xpath('.//text()').get()
                # 台湾url地址与其他不同
                if '台湾' in city:
                    continue
                # 北京二手房页面会重定向至本地,还未解决该问题
                if '北京' in city:
                    continue
                city_url = city_link.xpath('.//@href').get()
                city_text = re.findall('.*//(.*).*.fang', city_url)[0]
                # 构建新房的url链接
                newhouse_url = 'https://' + city_text + '.newhouse.fang.com/house/s/'
                # 构建二手房的url链接
                esf_url = 'https://' + city_text + '.esf.fang.com'
                yield scrapy.Request(
                    url=newhouse_url,
                    callback=self.parse_newhouse,
                    meta={'info': (province, city)}
                )
                yield scrapy.Request(
                    url=esf_url,
                    callback=self.parse_esf,
                    meta={'info': (province, city)}
                )

        def parse_newhouse(self, response):
        # 新房
        province, city = response.meta.get('info')
        lis = response.xpath('//div[contains(@class,"nl_con clearfix")]/ul/li')
        for li in lis:
           	name_text = li.xpath('.//div[@class="nlcd_name"]/a/text()').get()
            name = name_text.strip()
            # 页面中插入了广告页li,需要剔除
            if name:
                house_type_list = li.xpath('.//div[contains(@class, "house_type")]/a/text()').getall()
                house_type_list = list(map(lambda x: re.sub(r'/s', '', x), house_type_list))
                house_type = ','.join(list(filter(lambda x: x.endswith('居'), house_type_list)))
                area_text = ''.join(li.xpath('.//div[contains(@class, "house_type")]/text()').getall())
                area = re.sub(r'\s|-|/', '', area_text)
                address = li.xpath('.//div[@class="address"]/a/@title').get()
                district_text = ''.join(li.xpath('.//div[@class="address"]/a//text()').getall())
                try:
                    district = re.search(r'.*\[(.+)\].*', district_text).group(1)
                except:
                    district = 'None'
                sale = li.xpath('.//div[contains(@class, "fangyuan")]/span/text()').get()
                price = "".join(li.xpath(".//div[@class='nhouse_price']//text()").getall())
                price = re.sub(r"\s|广告", "", price)
                detail_url_text = li.xpath('.//div[@class="nlc_img"]/a/@href').get()
                detail_url = response.urljoin(detail_url_text)
                item = NewHouseItem(province=province, city=city, name=name, house_type=house_type, area=area,
                                    address=address, district=district, sale=sale, price=price, detail_url=detail_url)
                yield item

        next_url = response.xpath('//div[@class="page"]//a[class="next"]/@href').get()
        if next_url:
            yield scrapy.Request(url=response.urljoin(next_url),
                                 callback=self.parse_newhouse,
                                 meta={'info': (province, city)})

    def parse_esf(self, response):
        # 二手房
        province, city = response.meta.get('info')
        dls = response.xpath('//div[@class="shop_list shop_list_4"]/dl')
        for dl in dls:
            item = EsfHouseItem(province=province, city=city)
            name = dl.xpath('.//span[@class="tit_shop"]/text()').get()
            # 页面中插入了广告页li,需要剔除
            if name:
                infos = dl.xpath('.//p[@class="tel_shop"]/text()').getall()
                infos = list(map(lambda x: re.sub(r"\s", "", x), infos))
                for info in infos:
                    if "厅" in info:
                        item["house_type"] = info
                    elif '㎡' in info:
                        item["area"] = info
                    elif '层' in info:
                        item["floor"] = info
                    elif '向' in info:
                        item["orientation"] = info
                    elif '年建' in info:
                        item["year"] = re.sub("年建", "", info)
                item["address"] = dl.xpath('.//p[@class="add_shop"]/span/text()').get()
                item["total_price"] = "".join(dl.xpath(".//span[@class='red']//text()").getall())
                item["unit_price"] = dl.xpath(".//dd[@class='price_right']/span[2]/text()").get()
                item["detail_url"] = response.urljoin(dl.xpath(".//h4[@class='clearfix']/a/@href").get())
                item["name"] = name
                # 以下五个字段大概率会缺失,存入mysql会报错,因此加入判断
                if 'house_type' not in item:
                    item["house_type"] = '/'
                elif 'area' not in item:
                    item["area"] = '/'
                elif 'floor' not in item:
                    item["floor"] = '/'
                elif 'orientation' not in item:
                    item["orientation"] = '/'
                elif 'year' not in item:
                    item["year"] = '/'
                yield item
            next_url = response.xpath('//div[@class="page_al"]/p/a/@href').get()
            if next_url:
                yield scrapy.Request(url=response.urljoin(next_url),
                                     callback=self.parse_esf,
                                     meta={'info': (province, city)})

settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for fangtianxia_scrapy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'fangtianxia_scrapy'

SPIDER_MODULES = ['fangtianxia_scrapy.spiders']
NEWSPIDER_MODULE = 'fangtianxia_scrapy.spiders'

# mysql连接配置
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'password'
MYSQL_DATABASE = 'fangtianxia'
MYSQL_HOST = 'localhost'
MYSQL_PORT = 3306

# mongodb连接配置
MONGO_URI = 'mongodb://username:password@ip:port'
MONGO_DATABASE = 'dbname'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'fangtianxia_scrapy (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

LOG_FILE = 'scrapy.log'


# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
   'Accept-Language': 'en',
}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'fangtianxia_scrapy.middlewares.FangtianxiaScrapySpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
#    'fangtianxia_scrapy.middlewares.FangtianxiaScrapyDownloaderMiddleware': 543,
     'fangtianxia_scrapy.middlewares.RotateUserAgentMiddleware': 543,
}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
   # 'fangtianxia_scrapy.pipelines.FangTianXiaScrapyPipeline': 300,
   # 'fangtianxia_scrapy.pipelines.MysqlTwistedPipeline': 300,
# }

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


# Scrapy-Redis相关配置
# 确保request存储到redis中
SCHEDULER = "scrapy_redis.scheduler.Scheduler"

# 确保所有爬虫共享相同的去重指纹
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

# 设置redis为item pipeline
ITEM_PIPELINES = {
    'scrapy_redis.pipelines.RedisPipeline': 300,
    # 'fangtianxia_scrapy.pipelines.MysqlTwistedPipeline': 300,
    # 'fangtianxia_scrapy.pipelines.MongodbPipeline': 300,
}

# 在redis中保持scrapy-redis用到的队列,不会清理redis中的队列,从而可以实现暂停和恢复的功能。
SCHEDULER_PERSIST = True

# 设置连接redis信息
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_PASSWORD = 'password'

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class NewHouseItem(scrapy.Item):
    province = scrapy.Field()
    city = scrapy.Field()
    name = scrapy.Field()
    house_type = scrapy.Field()
    area = scrapy.Field()
    address = scrapy.Field()
    district = scrapy.Field()
    sale = scrapy.Field()
    price = scrapy.Field()
    detail_url = scrapy.Field()

class EsfHouseItem(scrapy.Item):
    province = scrapy.Field()
    city = scrapy.Field()
    name = scrapy.Field()
    house_type = scrapy.Field()
    area = scrapy.Field()
    floor = scrapy.Field()
    orientation = scrapy.Field()
    year = scrapy.Field()
    address = scrapy.Field()
    total_price = scrapy.Field()
    unit_price = scrapy.Field()
    detail_url = scrapy.Field()

middlewares.py, 主要是随机请求头


# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
import random


class RotateUserAgentMiddleware(UserAgentMiddleware):
    # for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
        "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
        "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
        "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
        "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
        "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
        "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

    def process_request(self, request, spider):
        ua = random.choice(self.user_agent_list)
        if ua:
            # 显示当前使用的useragent
            # print("********Current UserAgent:%s************" % ua)
            # 记录
            spider.logger.info('Current UserAgent: ' + ua)
            request.headers['User-Agent'] = ua

管道pipelines.py,存储为json文件和利用twisted框架异步存储数据到mysql

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import JsonLinesItemExporter
from fangtianxia_scrapy.items import NewHouseItem, EsfHouseItem
from twisted.enterprise import adbapi
import pymysql
import pymongo

class FangTianXiaScrapyPipeline(object):
    def __init__(self):
        self.newhouse_fp = open('newhouse.json', 'ab')
        self.esfhouse_fp = open('esfhouse.json', 'ab')
        self.newhouse_exporter = JsonLinesItemExporter(self.newhouse_fp, ensure_ascii=False)
        self.esfhouse_exporter = JsonLinesItemExporter(self.esfhouse_fp, ensure_ascii=False)

    def process_item(self, item, spider):
        if isinstance(item, NewHouseItem):
            self.newhouse_exporter.export_item(item)
        elif isinstance(item, EsfHouseItem):
            self.esfhouse_exporter.export_item(item)
        return item

    def close_spider(self, spider):
        self.newhouse_fp.close()
        self.esfhouse_fp.close()


class MysqlTwistedPipeline(object):
    def __init__(self, dbpool):
        self.dbpool = dbpool

    @classmethod
    def from_settings(cls, settings):
        db_params = dict(
            host=settings['MYSQL_HOST'],
            database=settings['MYSQL_database'],
            user=settings['MYSQL_USER'],
            passwd=settings['MYSQL_PASSWORD'],
            port=settings['MYSQL_PORT'],
            charset='utf8mb4',
            use_unicode=True,
            cursorclass=pymysql.cursors.DictCursor
        )
        dbpool = adbapi.ConnectionPool('pymysql', **db_params)
        return cls(dbpool)

    def process_item(self, item, spider):
        query = self.dbpool.runInteraction(self.do_insert, item)
        query.addErrback(self.handle_error, item, spider)
        return item

    def handle_error(self, failure, item, spider):
        print(failure)

    def do_insert(self, cursor, item):
        if isinstance(item, NewHouseItem):
            insert_sql = """insert into fangtianxia.newhouse(province, city, name, house_type, area, address, 
                            district, sale, price, detail_url)
                            Values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
            cursor.execute(insert_sql, (
                item['province'], item['city'], item['name'], item['house_type'], item['area'], item['address'],
                item['district'], item['sale'], item['price'], item['detail_url']))
        elif isinstance(item, EsfHouseItem):
            insert_sql = """insert into fangtianxia.esfhouse(province, city, name, house_type, area, floor, 
                            orientation, year, address, total_price, unit_price, detail_url)
                            Values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
            cursor.execute(insert_sql, (
                item['province'], item['city'], item['name'], item['house_type'], item['area'], item['floor'],
                item['orientation'], item['year'], item['address'], item['total_price'], item['unit_price'], item['detail_url']))

class MongodbPipeline(object):
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DATABASE')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]
        self.db[NewHouseItem.collection].create_index([('id', pymongo.ASCENDING)])
        self.db[EsfHouseItem.collection].create_index([('id', pymongo.ASCENDING)])
        print("打开数据库...")

    def close_spider(self,spider):
        print('写入完毕,关闭数据库.')
        self.client.close()

    def process_item(self, item, spider):
        if isinstance(item, NewHouseItem):
            self.db[item.collection].update({'detail_url': item['detail_url']}, {'$set': dict(item)}, True)
        elif isinstance(item, EsfHouseItem):
            self.db[item.collection].update({'detail_url': item['detail_url']}, {'$set': dict(item)}, True)
        print('正在写入...')
        return item

创建mysql文件

import pymysql

db = pymysql.connect('localhost', 'root', '199524', 'fangtianxia')
cursor = db.cursor()
sql1 = """CREATE TABLE newhouse (
         id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,
         province VARCHAR(200) NOT NULL,
         city VARCHAR(200) NOT NULL,
         name VARCHAR(200) NOT NULL,
         house_type VARCHAR(200) NOT NULL,
         area VARCHAR(200) NOT NULL,
         address VARCHAR(200) NOT NULL,
         district VARCHAR(200) NOT NULL,
         sale VARCHAR(200) NOT NULL,
         price VARCHAR(200) NOT NULL,
         detail_url VARCHAR(200) NOT NULL,
         created_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);"""

sql2 = """CREATE TABLE esfhouse (
         id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,
         province VARCHAR(200) NOT NULL,
         city VARCHAR(200) NOT NULL,
         name VARCHAR(200) NOT NULL,
         house_type VARCHAR(200) NOT NULL,
         area VARCHAR(200) NOT NULL,
         floor VARCHAR(200) NOT NULL,
         orientation VARCHAR(200) NOT NULL,
         year VARCHAR(200) NOT NULL,
         address VARCHAR(200) NOT NULL,
         total_price VARCHAR(200) NOT NULL,
         unit_price VARCHAR(200) NOT NULL,
         detail_url VARCHAR(200) NOT NULL,
         created_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);"""

cursor.execute(sql1)
cursor.execute(sql2)
db.commit()
db.close()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
【为什么学爬虫?】        1、爬虫入手容易,但是深入较难,如何写出高效率的爬虫,如何写出灵活性高可扩展的爬虫都是一项技术活。另外在爬虫过程中,经常容易遇到被反爬虫,比如字体反爬、IP识别、验证码等,如何层层攻克难点拿到想要的数据,这门课程,你都能学到!        2、如果是作为一个其他行业的开发者,比如app开发,web开发,学习爬虫能让你加强对技术的认知,能够开发出更加安全的软件和网站 【课程设计】 一个完整的爬虫程序,无论大小,总体来说可以分成三个步骤,分别是:网络请求:模拟浏览器的行为从网上抓取数据。数据解析:将请求下来的数据进行过滤,提取我们想要的数据。数据存储:将提取到的数据存储到硬盘或者内存中。比如用mysql数据库或者redis等。那么本课程也是按照这几个步骤循序渐进的进行讲解,带领学生完整的掌握每个步骤的技术。另外,因为爬虫的多样性,在爬取的过程中可能会发生被反爬、效率低下等。因此我们又增加了两个章节用来提高爬虫程序的灵活性,分别是:爬虫进阶:包括IP代理,多线程爬虫,图形验证码识别、JS加密解密、动态网页爬虫、字体反爬识别等。Scrapy和分布式爬虫:Scrapy框架、Scrapy-redis组件、分布式爬虫等。通过爬虫进阶的知识点我们能应付大量的反爬网站,而Scrapy框架作为一个专业的爬虫框架,使用他可以快速提高我们编写爬虫程序的效率和速度。另外如果一台机器不能满足你的需求,我们可以用分布式爬虫让多台机器帮助你快速爬取数据。 从基础爬虫到商业化应用爬虫,本套课程满足您的所有需求!【课程服务】 专属付费社群+定期答疑

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值