【实例】爬取贝壳租房10页数据
代码:
由于部分租房信息没有部分信息,所以添加了捕获异常
import requests
from bs4 import BeautifulSoup
from re import findall
from csv import writer
def get_beike(x):
url = 'https://cd.zu.ke.com/zufang/pg'+str(x)+'/#contentList'
response = requests.get(url)
result = response.text
# print(result)
sizes = findall(r'(.+?㎡)', result)
directions = findall(r'<i>/</i>(.+?)<i>/</i>', result)
patterns = findall(r'(.+?)<span class="hide">', result)
soup = BeautifulSoup(result, 'lxml')
listm = soup.select('.content__list--item--main')
# print(listm)
# print(len(listm))
i = 0
for x in listm:
hourse = x.select_one(".content__list--item--title>a").text.strip()
try:
l = x.select('.content__list--item--des>a')
location = (l[0].text + l[1].text + l[-1].text).strip()
except:
location = ' '
rent = x.select_one('.content__list--item-price').text.strip()
size = sizes[i].strip()
try:
direction = directions[i].strip()
except:
direction = ' '
try:
pattern = patterns[i].strip()
except:
pattern = ' '
i += 1
# return hourse, location, rent, size, direction, pattern
data = [hourse, location, rent, size, direction, pattern]
print(data)
with open('files/贝壳找房数据.csv', 'a', encoding='utf-8', newline='') as f:
w1 = writer(f)
w1.writerow(data)
if __name__ == '__main__':
with open('files/贝壳找房数据.csv', 'w', encoding='utf-8', newline='') as f:
w1 = writer(f)
w1.writerow(['名称', '地址', '租金', '大小', '朝向', '规格'])
for x in range(1, 11):
get_beike(x)
print(f'=============第{x}页完成==================')
方法2
import requests
from bs4 import BeautifulSoup
from re import sub, findall
import csv
def get_one_page(page):
# 1.获取网页数据
url = f'https://cd.zu.ke.com/zufang/pg{page}/#contentList'
response = requests.get(url)
# 2.解析数据
soup = BeautifulSoup(response.text, 'lxml')
# 获取每个房屋信息对应的div
div_list = soup.select('.content__list--item')
for div in div_list:
name = div.select_one('.twoline').text.strip()
info = div.select_one('.content__list--item--des').text.strip()
info = sub(r'\s+', '', info)
area = findall(r'\d+\.\d+㎡', info)[0]
house_type = findall(r'\d+室\d+厅\d+卫', info)[0]
# address = findall(r'精选/(.+?-.+?)/|^(.+?)/', info)[0]
# address = address[0] if address[0] else address[1]
# print(address)
address = div.select('.content__list--item--des>a')
new_address = '-'.join([x.text for x in address])
price = div.select_one('.content__list--item-price').text
# print(name, new_address, price, area, house_type)
w1.writerow([name, price, area, house_type, new_address])
print('------------------------一页获取完成--------------------')
if __name__ == '__main__':
w1 = csv.writer(open('files/贝壳租房.csv', 'w', encoding='utf-8', newline=''))
w1.writerow(['房屋', '价格', '面积', '户型', '地址'])
for x in range(1, 11):
get_one_page(x)