python爬虫requests源码链家_python爬虫——爬取链家房价信息(未完待续)

该博客介绍了如何利用Scrapy爬虫框架抓取链家网上北京通州地区的二手房信息。文章中展示了items.py中定义的数据结构,用于存储爬取到的小区名、地址等信息,并在settings.py中设置了ROBOTSTXT_OBEY为False,以允许爬虫工作。此外,还展示了spider.py文件中的爬虫逻辑,包括如何启动请求、解析页面并提取所需数据。
摘要由CSDN通过智能技术生成

爬取链家房价信息(未完待续)

items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items

#

# See documentation in:

# http://doc.scrapy.org/en/latest/topics/items.html

import scrapy

class LianjiaItem(scrapy.Item):

info1 = scrapy.Field()#小区名字

info1_url = scrapy.Field()#小区主页地址

info2 = scrapy.Field()#小区地址

info2_dizhi = scrapy.Field()#小区地址

info3 = scrapy.Field()#小区地址

info4 = scrapy.Field()#小区地址

settings.py

这里仅仅需要注意一个点:

# Obey robots.txt rules

ROBOTSTXT_OBEY = False

这里需要设置为 false 因为有的网站禁止爬虫

# -*- coding: utf-8 -*-

# Scrapy settings for lianjia project

#

# For simplicity, this file contains only settings considered important or

# commonly used. You can find more settings consulting the documentation:

#

# http://doc.scrapy.org/en/latest/topics/settings.html

# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html

# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'lianjia'

SPIDER_MODULES = ['lianjia.spiders']

NEWSPIDER_MODULE = 'lianjia.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent

#USER_AGENT = 'lianjia (+http://www.yourdomain.com)'

# Obey robots.txt rules

ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)

#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)

# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay

# See also autothrottle settings and docs

#DOWNLOAD_DELAY = 3

# The download delay setting will honor only one of:

#CONCURRENT_REQUESTS_PER_DOMAIN = 16

#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)

#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)

#TELNETCONSOLE_ENABLED = False

# Override the default request headers:

#DEFAULT_REQUEST_HEADERS = {

# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',

# 'Accept-Language': 'en',

#}

# Enable or disable spider middlewares

# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

#SPIDER_MIDDLEWARES = {

# 'lianjia.middlewares.MyCustomSpiderMiddleware': 543,

#}

# Enable or disable downloader middlewares

# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html

#DOWNLOADER_MIDDLEWARES = {

# 'lianjia.middlewares.MyCustomDownloaderMiddleware': 543,

#}

# Enable or disable extensions

# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html

#EXTENSIONS = {

# 'scrapy.extensions.telnet.TelnetConsole': None,

#}

# Configure item pipelines

# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html

#ITEM_PIPELINES = {

# 'lianjia.pipelines.SomePipeline': 300,

#}

# Enable and configure the AutoThrottle extension (disabled by default)

# See http://doc.scrapy.org/en/latest/topics/autothrottle.html

#AUTOTHROTTLE_ENABLED = True

# The initial download delay

#AUTOTHROTTLE_START_DELAY = 5

# The maximum download delay to be set in case of high latencies

#AUTOTHROTTLE_MAX_DELAY = 60

# The average number of requests Scrapy should be sending in parallel to

# each remote server

#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0

# Enable showing throttling stats for every response received:

#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)

# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings

#HTTPCACHE_ENABLED = True

#HTTPCACHE_EXPIRATION_SECS = 0

#HTTPCACHE_DIR = 'httpcache'

#HTTPCACHE_IGNORE_HTTP_CODES = []

#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

scrapy.py

#coding:utf-8

import scrapy,time

import re

from scrapy.http import Request

from lianjia.items import LianjiaItem

class Myspider(scrapy.Spider):

name='lianjia'

allow_domains=['lianjia.com']

base_url='http://bj.lianjia.com/ershoufang/tongzhou/pg'

def start_requests(self):

for i in range(1,2):

url=self.base_url+str(i)

yield Request(url,self.parse)

def parse(self,response):

print response.url

items=LianjiaItem()

#print response.page_source

for info in response.xpath('//div[4]/div[1]/ul/li'):

info1= info.xpath('div/div[1]/a/text()').extract_first()

info1_url= info.xpath('div/div[1]/a/@href').extract_first()

info2= info.xpath('div/div[2]/div/text()').extract_first()

info2_dizhi= info.xpath('div/div[3]/div/text()').extract_first()

info3= info.xpath('div/div[3]/div/a/text()').extract_first()

info4=info.xpath('div/div[4]/text()').extract_first()

items['info1']=info1

items['info1_url']=info1_url

items['info2']=info2

items['info2_dizhi']=info2_dizhi

items['info3']=info3

items['info4']=info4

yield Request(info1_url,callback=self.parse_mainurl,meta={'items':items})

def parse_mainurl(self,response):

items=response.meta['items']

print response.url

print items['info1'],items['info1_url']

print items['info2']

print items['info2_dizhi'],items['info3']

print items['info4']

print response.xpath('//div[3]/div/div/a/text()').extract_first()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值