python 爬取贝壳网小区名称_用Python按月爬取目标小区房源信息

import requests

from bs4 import BeautifulSoup

import random

import xlwt

import xlrd

from xlutils.copy import copy

ua_list = [

"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",

"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",

"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",

"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",

]

headers = {'User-agent': random.choice(ua_list)}

urls = ['bojuedadi0512', 'guanhuyihao0512', 'hefengyasong']

for u in urls:

url = 'http://' + u + '.fang.com/chushou/'

response = requests.get(url, headers=headers, timeout=4)

soup = BeautifulSoup(response.text, 'lxml')

xiaoqu = soup.find('a', class_='esfdetailName blueWord').get_text()

#打开excel

rdbook = xlrd.open_workbook(xiaoqu + '.xls')

wtbook = copy(rdbook)

#添加新sheet

worksheet = wtbook.add_sheet('201803', cell_overwrite_ok=True)

#找到最后一页的页码

last_page = soup.find_all('a', id='PageControl1_hlk_last')

if len(last_page) != 0:

last_url = last_page[0].attrs['href']

response_last = requests.get(last_url, headers=headers)

soup_last = BeautifulSoup(response_last.text, 'lxml')

total_page = soup_last.find('a', class_='pageNow').get_text()

else:

total_page = soup.find('a', class_='pageNow').get_text()

#开始提取

count = 0

for page in range(1, int(total_page)+1):

pageurl = 'http://' + u + '.fang.com/chushou/list/-h330-i3' + str(page) + '/'

res = requests.get(pageurl, headers=headers, timeout=4)

s = BeautifulSoup(res.text, 'lxml')

links = s.find_all('div', class_='fangList')

for i in links:

link = i.find('a')['href']

each = BeautifulSoup(str(i), 'lxml')

title = each.find('p', class_='fangTitle').get_text().replace(',', '').strip()

mianji = each.find('li').get_text()[:-2]

zongjia = each.find('span', class_='num').get_text()

danjia = each.find('li', class_='update').get_text()[:-4]

worksheet.write(count, 0, title)

worksheet.write(count, 1, int(zongjia))

worksheet.write(count, 2, int(mianji))

worksheet.write(count, 3, int(danjia))

worksheet.write(count, 4, link)

count += 1

print(count, title)

wtbook.save(xiaoqu + '.xls')

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值