python爬取链家房价消息_Python的scrapy之爬取链家网房价信息并保存到本地

本文介绍了如何使用Python的Scrapy框架爬取链家网站上的北京二手房房价信息,并将其保存到本地。爬虫结构包括起始URL、解析函数以及数据库和User-Agent池的处理。数据包括房源名称、链接、地址、详细信息、价格和每平方米价格。爬取的数据最终存储在txt文档中,便于后续分析北京房价。
摘要由CSDN通过智能技术生成

因为有在北京租房的打算,于是上网浏览了一下链家网站的房价,想将他们爬取下来,并保存到本地。

先看链家网的源码。。房价信息 都保存在 ul 下的li 里面

90b2d58a615b71579fe2f263bd92e9c0.png3738577

爬虫结构:

71e27a09f5a5028560d79ea658e57d46.png3738577​ 其中封装了一个数据库处理模块,还有一个user-agent池。。

先看mylianjia.py

#-*- coding: utf-8 -*-

importscrapyfrom ..items importLianjiaItemfrom scrapy.http importRequestfrom parsel importSelectorimportrequestsimportosclassMylianjiaSpider(scrapy.Spider):

name= 'mylianjia'

#allowed_domains = ['lianjia.com']

start_urls = ['https://bj.lianjia.com/ershoufang/chaoyang/pg']defstart_requests(self):for i in range(1, 101): #100页的所有信息

url1 = self.start_urls +list(str(i))#print(url1)

url = ''

for j inurl1:

url+= j + ''

yieldRequest(url, self.parse)defparse(self, response):print(response.url)'''response1 = requests.get(response.url, params={'search_text': '粉墨', 'cat': 1001})

if response1.status_code == 200:

print(response1.text)

dirPath = os.path.join(os.getcwd(), 'data')

if not os.path.exists(dirPath):

os.makedirs(dirPath)

with open(os.path.join(dirPath, 'lianjia.html'), 'w', encoding='utf-8')as fp:

fp.write(response1.text)

print('网页源码写入完毕')'''infoall=response.xpath("//div[4]/div[1]/ul/li")#infos = response.xpath('//div[@class="info clear"]')

#print(infos)

#info1 = infoall.xpath('div/div[1]/a/text()').extract_first()

#print(infoall)

for info ininfoall:

item=LianjiaItem()#print(info)

info1 = info.xpath('div/div[1]/a/text()').extract_first()

info1_url= info.xpath('div/div[1]/a/@href').extract_first()#info2 = info.xpath('div/div[2]/div/text()').extract_first()

info2_dizhi = info.xpath('div/div[2]/div/a/text()').extract_first()

info2_xiangxi= info.xpath('div/div[2]/div/text()').extract()#info3 = info.xpath('div/div[3]/div/a/text()').extract_first()

#info4 = info.xpath('div/div[4]/text()').extract_first()

price = info.xpath('div/div[4]/div[2]/div/span/text()').extract_first()

perprice= info.xpath('div/div[4]/div[2]/div[2]/span/text()').extract_first()#print(info1,'--',info1_url,'--',info2_dizhi,'--',info2_xiangxi,'--',info4,'--',price,perprice)

info2_xiangxi1 = ''

for j1 ininfo2_xiangxi:

info2_xiangxi1+= j1 + ''

#print(info2_xiangxi1) #化为字符串

item['houseinfo']=info1

item['houseurl']=info1_url

item['housedizhi']=info2_dizhi

item['housexiangxi']=info2_xiangxi1

item['houseprice']=price

item['houseperprice']=perpriceyield item

3738577

再看items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items

#

# See documentation in:

# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy

class LianjiaItem(scrapy.Item):

# define the fields for your item here like:

# name = scrapy.Field()

houseinfo=scrapy.Field()

houseurl=scrapy.Field()

housedizhi=scrapy.Field()

housexiangxi=scrapy.Field()

houseprice=scrapy.Field()

houseperprice=scrapy.Field()

pass

3738577

接下来看pipelines.py

# -*- coding: utf-8 -*-

# Define your item pipelines here

#

# Don't forget to add your pipeline to the ITEM_PIPELINES setting

# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

class LianjiaPipeline(object):

def process_item(self, item, spider):

print('房屋信息:',item['houseinfo'])

print('房屋链接:', item['houseurl'])

print('房屋位置:', item['housedizhi'])

print('房屋详细信息:', item['housexiangxi'])

print('房屋总价:', item['houseprice'],'万')

print('平方米价格:', item['houseperprice'])

print('===='*10)

return item

3738577

接下来看csvpipelines.py

import os

print(os.getcwd())

class LianjiaPipeline(object):

def process_item(self, item, spider):

with open('G:\pythonAI\爬虫大全\lianjia\data\house.txt', 'a+', encoding='utf-8') as fp:

name=str(item['houseinfo'])

dizhi=str(item['housedizhi'])

info=str(item['housexiangxi'])

price=str(item['houseprice'])

perprice=str(item['houseperprice'])

fp.write(name + dizhi + info+ price +perprice+ '\n')

fp.flush()

fp.close()

return item

print('写入文件成功')

3738577

接下来看 settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for lianjia project

#

# For simplicity, this file contains only settings considered important or

# commonly used. You can find more settings consulting the documentation:

#

# https://doc.scrapy.org/en/latest/topics/settings.html

# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html

# https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'lianjia'

SPIDER_MODULES = ['lianjia.spiders']

NEWSPIDER_MODULE = 'lianjia.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent

#USER_AGENT = 'lianjia (+http://www.yourdomain.com)'

# Obey robots.txt rules

ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)

#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)

# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay

# See also autothrottle settings and docs

DOWNLOAD_DELAY = 0.5

# The download delay setting will honor only one of:

#CONCURRENT_REQUESTS_PER_DOMAIN = 16

#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)

#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)

#TELNETCONSOLE_ENABLED = False

# Override the default request headers:

#DEFAULT_REQUEST_HEADERS = {

# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',

# 'Accept-Language': 'en',

#}

# Enable or disable spider middlewares

# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html

SPIDER_MIDDLEWARES = {

'lianjia.middlewares.LianjiaSpiderMiddleware': 543,

#'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,

#'lianjia.rotate_useragent.RotateUserAgentMiddleware' :400

}

# Enable or disable downloader middlewares

# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html

DOWNLOADER_MIDDLEWARES = {

'lianjia.middlewares.LianjiaDownloaderMiddleware': 543,

}

# Enable or disable extensions

# See https://doc.scrapy.org/en/latest/topics/extensions.html

#EXTENSIONS = {

# 'scrapy.extensions.telnet.TelnetConsole': None,

#}

# Configure item pipelines

# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html

ITEM_PIPELINES = {

'lianjia.pipelines.LianjiaPipeline': 300,

#'lianjia.iopipelines.LianjiaPipeline': 301,

'lianjia.csvpipelines.LianjiaPipeline':302,

}

# Enable and configure the AutoThrottle extension (disabled by default)

# See https://doc.scrapy.org/en/latest/topics/autothrottle.html

#AUTOTHROTTLE_ENABLED = True

# The initial download delay

#AUTOTHROTTLE_START_DELAY = 5

# The maximum download delay to be set in case of high latencies

#AUTOTHROTTLE_MAX_DELAY = 60

# The average number of requests Scrapy should be sending in parallel to

# each remote server

#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

# Enable showing throttling stats for every response received:

#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)

# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings

HTTPCACHE_ENABLED = True

HTTPCACHE_EXPIRATION_SECS = 0

HTTPCACHE_DIR = 'httpcache'

HTTPCACHE_IGNORE_HTTP_CODES = []

HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

LOG_LEVEL='INFO'

LOG_FILE='lianjia.log'

3738577

最后看starthouse.py

from scrapy.cmdline import execute

execute(['scrapy', 'crawl', 'mylianjia'])

3738577

代码运行结果

bd43c4015d2dc563611984a97bdc7478.png3738577

保存到本地效果:

9a99b980dbc76cbb225432eb15bd6765.png3738577

完成,事后可以分析一下房价和每平方米的方剂,,因为是海淀区的,,可以看到 都是好几万一平米,总价也得几百万了 而且是二手房,,,可以看出来 ,在北京买房太难。。。。

源码 tyutltf/lianjia: 爬取链家北京房价并保存txt文档  https://github.com/tyutltf/lianjia

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值