【实例】爬取贝壳租房10页数据

【实例】爬取贝壳租房10页数据

代码:
由于部分租房信息没有部分信息,所以添加了捕获异常

import requests
from bs4 import BeautifulSoup
from re import findall
from csv import writer

def get_beike(x):
    url = 'https://cd.zu.ke.com/zufang/pg'+str(x)+'/#contentList'
    response = requests.get(url)
    result = response.text
    # print(result)
    sizes = findall(r'(.+?㎡)', result)
    directions = findall(r'<i>/</i>(.+?)<i>/</i>', result)
    patterns = findall(r'(.+?)<span class="hide">', result)

    soup = BeautifulSoup(result, 'lxml')
    listm = soup.select('.content__list--item--main')
    # print(listm)
    # print(len(listm))
    i = 0
    for x in listm:
        hourse = x.select_one(".content__list--item--title>a").text.strip()
        try:
            l = x.select('.content__list--item--des>a')
            location = (l[0].text + l[1].text + l[-1].text).strip()
        except:
            location = ' '
        rent = x.select_one('.content__list--item-price').text.strip()
        size = sizes[i].strip()
        try:
            direction = directions[i].strip()
        except:
            direction = ' '
        try:
            pattern = patterns[i].strip()
        except:
            pattern = ' '
        i += 1

        # return hourse, location, rent, size, direction, pattern

        data = [hourse, location, rent, size, direction, pattern]
        print(data)
        with open('files/贝壳找房数据.csv', 'a', encoding='utf-8', newline='') as f:
            w1 = writer(f)
            w1.writerow(data)







if __name__ == '__main__':
    with open('files/贝壳找房数据.csv', 'w', encoding='utf-8', newline='') as f:
        w1 = writer(f)
        w1.writerow(['名称', '地址', '租金', '大小', '朝向', '规格'])
    for x in range(1, 11):
        get_beike(x)
        print(f'=============第{x}页完成==================')

在这里插入图片描述
在这里插入图片描述

方法2


import requests
from bs4 import BeautifulSoup
from re import sub, findall
import csv


def get_one_page(page):
    # 1.获取网页数据
    url = f'https://cd.zu.ke.com/zufang/pg{page}/#contentList'
    response = requests.get(url)

    # 2.解析数据
    soup = BeautifulSoup(response.text, 'lxml')
    # 获取每个房屋信息对应的div
    div_list = soup.select('.content__list--item')
    for div in div_list:
        name = div.select_one('.twoline').text.strip()
        info = div.select_one('.content__list--item--des').text.strip()
        info = sub(r'\s+', '', info)
        area = findall(r'\d+\.\d+㎡', info)[0]
        house_type = findall(r'\d+室\d+厅\d+卫', info)[0]

        # address = findall(r'精选/(.+?-.+?)/|^(.+?)/', info)[0]
        # address = address[0] if address[0] else address[1]
        # print(address)

        address = div.select('.content__list--item--des>a')
        new_address = '-'.join([x.text for x in address])

        price = div.select_one('.content__list--item-price').text
        # print(name, new_address, price, area, house_type)
        w1.writerow([name, price, area, house_type, new_address])
    print('------------------------一页获取完成--------------------')


if __name__ == '__main__':
    w1 = csv.writer(open('files/贝壳租房.csv', 'w', encoding='utf-8', newline=''))
    w1.writerow(['房屋', '价格', '面积', '户型', '地址'])

    for x in range(1, 11):
        get_one_page(x)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值