python爬取安居客房屋价格用地图表示出来

1. 房屋价格地图

1.1 项目思路主要分成三个步骤

  • 首先利用python爬取安居客网站上个区的房屋价格,并把房屋所在小区进行归类,求出小区均价。
  • 然后利用百度地图api中的地理编码,我们可以获取小区所在的经纬度,注意这里不是所有小区都能准确获取的,存在一定数量的小区无法获取精确的地理坐标。
  • 最后利用BDP线上分析可以绘制出如下小区均价地图。

效果如下:
在这里插入图片描述

1.2 项目目录

在这里插入图片描述

  • get_data:用于python爬取安居客房屋价格并整理
  • get_lnglat:用于百度地图api中地理编码,获取小区的经纬度并整理

2. python爬取安居客房屋价格并整理

import datetime
import re

import requests
from lxml import etree
import pandas as pd
import json
import math
import random
import time
import numpy as np
import urllib.request


# 获取安居客网站中某市二手房楼盘的每个区域的网址
def get_different_area_wang_zhi(url):
    mozilla = [
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)"
    ]
    headers = {"Cache-Control": "max-age=0",
               "User-Agent": "{}".format(random.choice(mozilla)),
               "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
               "Accept-Language": "zh-CN,zh;q=0.9"}
    response = requests.get(url, headers=headers).text
    re = etree.HTML(response)
    # 返回列表格式
    wang_zhi = re.xpath(
        '//div[@id="content_Rd1"]/div[@class="clearfix"]/div[@class="details float_l"][1]/div[@class="areas"]/a/@href')
    return wang_zhi  # 第一个网址为全部的小区的网址,应该去掉


# 得到一个区域的二手房楼盘的总页数(n)
def get_one_area_number(url):
    mozilla = [
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)"
    ]
    headers = {"Cache-Control": "max-age=0",
               "User-Agent": "{}".format(random.choice(mozilla)),
               "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
               "Accept-Language": "zh-CN,zh;q=0.9"}
    response = requests.get(url, headers=headers).text
    re = etree.HTML(response)

    number = re.xpath('//div[@class="pagination"]/ul[@class="page"]/li[@class="page-item last"]/a/text()')
    if number == '':
        print('页面没有小区数据')
    else:
        n = int(number[0])
        return n


# 爬取安居客的二手房小区信息
def anjuke_new(url, n, city_name):
    mozilla = [
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)"
    ]
    headers = {"Cache-Control": "max-age=0",
               "User-Agent": "{}".format(random.choice(mozilla)),
               "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
               "Accept-Language": "zh-CN,zh;q=0.9"}
    l = []
    for i in range(1, n + 1):
        if i == 1:
            url_load = url
        else:
            url_load = url + "p" + str(i) + '/'
        address = []
        mian_ji = []
        year = []
        try:
            print("正在爬取{}的第{}页".format(url, i))
            res = requests.get(url_load, headers=headers).text
            f = etree.HTML(res)
            # 获取当前页所有小区的网址

            # 获取房屋所在小区名字
            name = f.xpath(
                '//section[@class="list"]/div[@tongji_tag="fcpc_ersflist_gzcount"]//p['
                '@class="property-content-info-comm-name"]/text()')
            # 获取房屋所在小区地址
            # address_tmp = f.xpath(
            #     '//section[@class="list"]/div[@tongji_tag="fcpc_ersflist_gzcount"]//p[@class="property-content-info-comm-address"]/span/text()')
            # j = 0
            # while j < len(address_tmp):
            #     address.append(address_tmp[i] + address_tmp[i + 1] + address_tmp[i + 2])
            #     j += 3
            # 获取房屋均价
            avg_price = f.xpath(
                '//section[@class="list"]/div[@tongji_tag="fcpc_ersflist_gzcount"]//div[@class="property-price"]/p['
                '@class="property-price-average"]/text()')
            mian_ji_tmp = f.xpath(
                '//section[@class="list"]/div[@tongji_tag="fcpc_ersflist_gzcount"]//p['
                '@class="property-content-info-text"][1]/text()')
            for item in mian_ji_tmp:
                mian_ji.append(item.strip())
            # 获取房屋所在小区建设年份
            year_tmp = f.xpath(
                '//section[@class="list"]/div[@tongji_tag="fcpc_ersflist_gzcount"]//p[@class="property-content-info-text"][4]/text()')
            for item in year_tmp:
                year.append(item.strip())

        except:
            print("爬取{}的第{}页错误".format(url, i))
            with open(r"./anjuke_house_error.txt", "a") as f:
                f.write("{}第{}页出错\n".format(url, i))
            continue
        print("爬取{}的第{}页完成".format(url, i))

        for j in range(len(avg_price)):
            d = {}
            d["小区名称"] = name[j]
            # d["小区地址"] = address[j]
            d["房屋均价"] = avg_price[j]
            d["房屋面积"] = mian_ji[j]
            try:
                d["建造年代"] = year[j]
            except IndexError:
                d["建造年代"] = ''
            l.append(d)

    data = pd.DataFrame(l)
    return data


def processing_data(data, city_name):
    date = datetime.datetime.now()
    year = date.year
    month = date.month
    day = date.day
    date_list = str('-').join([str(year), str(month), str(day)])
    flag = False
    data_prv = ''
    try:
        data_prv = pd.read_csv('{}_anjuke_house.csv'.format(city_name), encoding='utf_8_sig')
    except FileNotFoundError:
        flag = True
        l_name = []
        l = []
        for i in data.index:
            data_name = data.loc[i, '小区名称']
            if data_name in l_name:
                continue
            else:
                data_tmp = data[data_name == data['小区名称']]
                price = []
                reg = '^\d+'
                for j in data_tmp.index:
                    price.append(int(re.findall(reg, data_tmp.loc[j, '房屋均价'])[0]))
                avg_price = np.mean(np.array(price))
                d = {}
                d['小区名称'] = data_name
                d['小区均价' + date_list] = avg_price
                l.append(d)
                l_name.append(data_name)
        process_data = pd.DataFrame(l)
        process_data.to_csv('./{}_anjuke_house.csv'.format(city_name), index=False, encoding='utf_8_sig')

    if not flag:
        l_name = []
        data_prv['小区均价' + date_list] = ''
        for i in data.index:
            data_name = data.loc[i, '小区名称']
            if data_name in l_name:
                continue
            else:
                data_tmp = data[data_name == data['小区名称']]
                price = []
                reg = '^\d+'
                for j in data_tmp.index:
                    price.append(int(re.findall(reg, data_tmp.loc[j, '房屋均价'])[0]))
                avg_price = np.mean(np.array(price))
                if data_name in data_prv['小区名称'].values:
                    data_prv.loc[data_prv[data_prv['小区名称'] == data_name].index, '小区均价' + date_list] = avg_price
                    l_name.append(data_name)
                else:
                    d = {'小区名称': data_name, '小区均价' + date_list: avg_price}
                    data_prv.append(d, ignore_index=True)
                    l_name.append(data_name)
        data_prv.to_csv('./{}_anjuke_house.csv'.format(city_name), index=False, encoding='utf_8_sig')


# 安居客二手房主函数调用
def anjuke_second_main(url):
    wang_zhi = get_different_area_wang_zhi(url)
    for item in wang_zhi:
        n = get_one_area_number(item)
        reg = '\/([A-Za-z]+)\/$'
        city_name = re.findall(reg, item)[0]
        data = anjuke_new(item, n, city_name)
        processing_data(data, city_name)
        time.sleep(random.randint(10, 15))


# 爬取其他城市修改url,只需要修改城市名称的简称即可,例如西安是xa,厦门是xm,具体看安居客的网址。
if __name__ == "__main__":
    url = "https://wuhan.anjuke.com/?pi=PZ-baidu-pc-all-biaoti"
    # 用于储存文件
    anjuke_second_main(url)

  • get_different_area_wang_zhi()
    获取安居客网站中某市二手房楼盘的每个区域的网址。
    在这里插入图片描述
  • get_one_area_number()
    获取二手房楼盘每个区域网址中一共有多少页,我发现安居客所显示的一共最多就是50页,但是这不一定是该区域的所有房屋价格,应该是一个不完全统计。
    在这里插入图片描述
  • anjuke_new()
    获取二手房楼盘每个区域网址中能显示的所有房屋信息。
  • processing_data()
    对获取的数据进行处理,主要是把相同小区房屋的价格求平均以得到小区的均价。
  • anjuke_second_main()
    主函数

3. 利用百度地图api中的地理编码并整理

import json
import os
from urllib.request import urlopen
from urllib.parse import quote

import pandas as pd
import requests


def getfilepath(path):
    list_name = []
    for file in os.listdir(path):
        file_path = os.path.join(path, file)
        if os.path.isdir(file_path):
            pass
        else:
            if file_path.endswith('.csv'):
                list_name.append(file_path)
    return list_name


def getlnglat(address, city):
    flag = 1
    url = 'http://api.map.baidu.com/geocoding/v3/'
    output = 'json'
    ak = 'sQMfKlPK4yCefIC28eb3Hn3QTO9MzEUV'
    city = quote(city)
    address_url = quote(address)
    qing_qiu = quote('请求')
    uri = url + '?' + 'city=' + city + '&address=' + address_url + '&output=' + output + '&ak=' + ak + '&callback=showLocation' + '//GET' + qing_qiu

    try:
        while flag != 0:
            res = requests.get(uri).text
            temp = json.loads(res)  # 将字符串转化为json
            flag = temp['status']
    except:
        print('发送请求失败')

    if temp['result']['level'] != '区县' and temp['result']['level'] != '城市':
        d = {'小区名称': address, 'lat': temp['result']['location']['lat'], 'lng': temp['result']['location']['lng']}
        return d  # 纬度 latitude,经度 longitude
    else:
        print('地址: ' + address + '  非地产小区: ' + temp['result']['level'] + ' lat ' + str(
            temp['result']['location']['lat']) + ' lng ' + str(temp['result']['location']['lng']))
        return 0


def add_lnglat(path):
    city = '武汉市'
    area = ''
    if path.find('caidianz') != -1:
        area = '蔡甸区'
    if path.find('dongxihu') != -1:
        area = '东西湖区'
    if path.find('hannanz') != -1:
        area = '汉南区'
    if path.find('hanyang') != -1:
        area = '汉阳区'
    if path.find('hongshana') != -1:
        area = '洪山区'
    if path.find('huangpiz') != -1:
        area = '黄陂区'
    if path.find('jiangan') != -1:
        area = '江岸区'
    if path.find('jiangxiat') != -1:
        area = '江夏区'
    if path.find('qiaokou') != -1:
        area = '硚口区'
    if path.find('qingshan') != -1:
        area = '青山区'
    if path.find('wuchanga') != -1:
        area = '武昌区'
    if path.find('xinzhouz') != -1:
        area = '新洲区'
    if path.find('zhuankouk') != -1:
        area = '沌口'

    data = pd.read_csv(path, encoding='utf_8_sig')
    village_names = data['小区名称'].values
    for village_name in village_names:
        coordinate = getlnglat(village_name, city + area)
        if coordinate != 0:
            data.loc[data[data['小区名称'] == coordinate['小区名称']].index, 'lat'] = coordinate['lat']
            data.loc[data[data['小区名称'] == coordinate['小区名称']].index, 'lng'] = coordinate['lng']
    data.to_csv(path, index=False, encoding='utf_8_sig')


if __name__ == '__main__':
    list_name = getfilepath('./')
    for item in list_name:
        add_lnglat(item)

  • getfilepath()
    获取当前目录下面的所有csv文件。
  • getlnglat()
    获取一个小区地址所对应的经纬度,并对经纬度是否合理做判断。
  • add_lnglat()
    把经纬度信息添加到原先的csv文件中,方便后续的作图。

4. 利用BDP进行数据处理

网址https://me.bdp.cn/index.html#/
通过添加数据设置经纬度和把房屋均价用颜色标识即可实现。

  • 5
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值