提出要求
链接如下:
基本没有出现JSON数据的请求,所以非常简单
from fake_useragent import UserAgent
import requests
from lxml import etree
from math import ceil
import time
import random
def iskong(a):
if len(a):
return a
else:
return ''
n = 0
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
while True:
n+=1
url = 'https://bj.lianjia.com/ershoufang'
response = requests.get(url,headers=header).content.decode('utf-8')
tree = etree.HTML(response)
list_a_href = tree.xpath('//div[@class="sub_nav section_sub_nav"]/a/@href')
list_title = tree.xpath('//div[@class="sub_nav section_sub_nav"]/a/@title')
for i in zip(list_a_href,list_title):
if 'https://bj.lianjia.com' in i[0]:
lianjie = i[0]
else:
lianjie = 'https://bj.lianjia.com/'+i[0]
print(lianjie,i[1])
#开始爬取每个地区的列表页
print('正在爬取'+i[1])
response = requests.get(lianjie,headers=header).content.decode('utf-8')
tree =etree.HTML(response)
num = tree.xpath("//h2//span/text()")
print("此地区一共有",num[0],'套房子')
page = ceil(int(num[0])/30)
print("一共有",page,"页")
for i in range(1,page+1):
print("开始爬取第",i,"页")
base_url = 'https://bj.lianjia.com/ershoufang/dongcheng/pg{}/'.format(i)
response = requests.get(base_url,headers=header).content.decode('utf-8')
tree = etree.HTML(response)
house = tree.xpath('//div[@class="title"]/a/@href')
for j in range(0,len(house)):
try:
print("正在爬取第",i,"页的第",j+1,"套房")
response = requests.get(house[j],headers=header).content.decode('utf-8')
tree = etree.HTML(response)
print("正在获取具体房源信息")
time.sleep(random.choice([.2,.3,.4,.5,1]))
fjxx = tree.xpath('//div[@class="title"]/h1/text()')[0]
wsgs = tree.xpath('//div[@class="content"]/ul/li/text()')[0]
jiage = tree.xpath('//div[@class="price "]/span/text()')[0]
telphone = tree.xpath('//div[@class="phone"]//text()')
if len(telphone):
telphone = iskong(telphone[0])+iskong(telphone[1])+iskong(telphone[2])+iskong(telphone[3])
else:
telphone = ''
print(telphone)
print(fjxx)
a = (
"正在爬取第"+str(i)+ "页的第"+str(j + 1)+"套房"+'\n'+'\n'+
"房屋信息:"+fjxx+'\n'
+"卧室个数:"+wsgs[0]+'\n'
+'厅的个数:'+wsgs[2]+'\n'
+'卫生间个数:'+wsgs[6]+'\n'
+"房间价格:"+jiage+'万'+'\n'
+telphone+'\n'+'\n'
)
with open('lianjia.txt', 'a', encoding='utf-8') as fp:
fp.write(a)
except:
pass
中途有可能出现爬取出错的问题,最主要的就是网络链接问题,和爬取频繁,我们可以尝试让它睡下。
我这里使用的TRY EXCEPT结构,只要出现错误,就会停止本次爬取,然后继续爬取下一个房源,由于链家网站的反爬技术非常的简单,基本没有,所以我们的爬取几乎没有遇到任何困难