厦门市租房情况分析App02项目

需求分析

1:爬取厦门市租房网的信息
2 : 对厦门市各个区的租房的价格进行分析
3:通过数据的可视化,来让别人清楚了解厦门市各个区的租房价格的水平

架构选型

在这里插入图片描述

开发环境及需要的其他的插件

在这里插入图片描述

数据来源

网页url:http://fangzi.xmfish.com/web/search_hire.html?h=&hf=&ca=59201&r=&s=&a=&rm=&f=&d=&tp=&l=0&tg=&hw=&o=&ot=0&tst=0&page=2

程序结构图:
在这里插入图片描述

爬虫程序(Crawling_Data.py):

# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import csv

# num表示记录序号
Url_head = "http://fangzi.xmfish.com/web/search_hire.html?h=&hf=&ca=5920"
Url_tail = "&r=&s=&a=&rm=&f=&d=&tp=&l=0&tg=&hw=&o=&ot=0&tst=0&page="
Num = 0
Filename = "rent.csv"


# 把每一页的记录写入文件中
def write_csv(msg_list):
    out = open(Filename, 'a', newline='')
    csv_write = csv.writer(out, dialect='excel')
    for msg in msg_list:
        csv_write.writerow(msg)
    out.close()


# 访问每一页
def acc_page_msg(page_url):
    web_data = requests.get(page_url).content.decode('utf8')
    soup = BeautifulSoup(web_data, 'html.parser')
    address_list = []
    area_list = []
    num_address = 0
    num_area = 0
    msg_list = []

    # 得到了地址列表,以及区域列表
    for tag in soup.find_all(attrs="list-addr"):
        for em in tag:
            count = 0
            for a in em:
                count += 1
                if count == 1 and a.string != "[":
                    address_list.append(a.string)
                elif count == 2:
                    area_list.append(a.string)
                    num_area += 1
                elif count == 4:
                    if a.string is not None:
                        address_list[num_address] = address_list[num_address] + "-" + a.string
                    else:
                        address_list[num_address] = address_list[num_address] + "-Null"
                    num_address += 1

    # 得到了价格列表
    price_list = []
    for tag in soup.find_all(attrs="list-price"):
        price_list.append(tag.b.string)

    # 组合成为一个新的tuple——list并加上序号
    for i in range(len(price_list)):
        txt = (address_list[i], area_list[i], price_list[i])
        msg_list.append(txt)


    # 写入csv
    write_csv(msg_list)


# 爬所有的页面
def get_pages_urls():
    urls = []
    # 思明可访问页数134
    for i in range(134):
        urls.append(Url_head + "1" + Url_tail + str(i + 1))
    # 湖里可访问页数134
    for i in range(134):
        urls.append(Url_head + "2" + Url_tail + str(i + 1))
    # 集美可访问页数27
    for i in range(27):
        urls.append(Url_head + "3" + Url_tail + str(i + 1))
    # 同安可访问页数41
    for i in range(41):
        urls.append(Url_head + "4" + Url_tail + str(i + 1))
    # 翔安可访问页数76
    for i in range(76):
        urls.append(Url_head + "5" + Url_tail + str(i + 1))
    # 海沧可访问页数6
    for i in range(6):
        urls.append(Url_head + "6" + Url_tail + str(i + 1))
    return urls


def run():
    print("开始爬虫")
    out = open(Filename, 'a', newline='')
    csv_write = csv.writer(out, dialect='excel')
    title = ("address", "area", "price")
    csv_write.writerow(title)
    out.close()
    url_list = get_pages_urls()
    for url in url_list:
        try:
            acc_page_msg(url)
        except:
            print("格式出错", url)
    print("结束爬虫")

数据存储

在这里插入图片描述

数据处理 (项目重点)

spark 数据处理程序(SparkBatch_Data.py)

# -*- coding: utf-8 -*-

from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType


def spark_analyse(filename):
    # 程序主入口
    spark = SparkSession.builder.master("local").appName("DataBatch").getOrCreate()
    
    df = spark.read.csv(filename, header=True, encoding="GBK")
    print(df)
    df.show()
    # max_list存储各个区的最大值,0海沧,1为湖里,2为集美,3为思明,4为翔安,5为同安;同理的mean_list, 以及min_list,approxQuantile中位数
    max_list = [0 for i in range(6)]
    # 平均值
    mean_list = [1.2 for i in range(6)]
    # min
    min_list = [0 for i in range(6)]
    # 中位数
    mid_list = [0 for i in range(6)]
    # 类型转换,十分重要,保证了price列作为int用来比较,否则会用str比较, 同时排除掉一些奇怪的价格,比如写字楼的出租超级贵
    # 或者有人故意标签1元,其实要面议, 还有排除价格标记为面议的

    df = df.filter(df.price != '面议').withColumn("price", df.price.cast(IntegerType()))
    #
    df = df.filter(df.price >= 50).filter(df.price <= 40000)
    #
    # print(df.filter(df.area == "海沧"))
    #
    mean_list[0] = df.filter(df.area == "海沧").agg({"price": "mean"}).first()['avg(price)']
    # print(mean_list[0].collect())
    #
    mean_list[1] = df.filter(df.area == "湖里").agg({"price": "mean"}).first()['avg(price)']
    mean_list[2] = df.filter(df.area == "集美").agg({"price": "mean"}).first()['avg(price)']
    mean_list[3] = df.filter(df.area == "思明").agg({"price": "mean"}).first()['avg(price)']
    mean_list[4] = df.filter(df.area == "翔安").agg({"price": "mean"}).first()['avg(price)']
    mean_list[5] = df.filter(df.area == "同安").agg({"price": "mean"}).first()['avg(price)']

    min_list[0] = df.filter(df.area == "海沧").agg({"price": "min"}).first()['min(price)']
    min_list[1] = df.filter(df.area == "湖里").agg({"price": "min"}).first()['min(price)']
    min_list[2] = df.filter(df.area == "集美").agg({"price": "min"}).first()['min(price)']
    min_list[3] = df.filter(df.area == "思明").agg({"price": "min"}).first()['min(price)']
    min_list[4] = df.filter(df.area == "翔安").agg({"price": "min"}).first()['min(price)']
    min_list[5] = df.filter(df.area == "同安").agg({"price": "min"}).first()['min(price)']
    #
    max_list[0] = df.filter(df.area == "海沧").agg({"price": "max"}).first()['max(price)']
    max_list[1] = df.filter(df.area == "湖里").agg({"price": "max"}).first()['max(price)']
    max_list[2] = df.filter(df.area == "集美").agg({"price": "max"}).first()['max(price)']
    max_list[3] = df.filter(df.area == "思明").agg({"price": "max"}).first()['max(price)']
    max_list[4] = df.filter(df.area == "翔安").agg({"price": "max"}).first()['max(price)']
    max_list[5] = df.filter(df.area == "同安").agg({"price": "max"}).first()['max(price)']
    print(max_list[1])
    # 返回值是一个list,所以在最后加一个[0]
    mid_list[0] = df.filter(df.area == "海沧").approxQuantile("price", [0.5], 0.01)[0]
    mid_list[1] = df.filter(df.area == "湖里").approxQuantile("price", [0.5], 0.01)[0]
    mid_list[2] = df.filter(df.area == "集美").approxQuantile("price", [0.5], 0.01)[0]
    mid_list[3] = df.filter(df.area == "思明").approxQuantile("price", [0.5], 0.01)[0]
    mid_list[4] = df.filter(df.area == "翔安").approxQuantile("price", [0.5], 0.01)[0]
    mid_list[5] = df.filter(df.area == "同安").approxQuantile("price", [0.5], 0.01)[0]

    all_list = []
    all_list.append(min_list)
    all_list.append(max_list)
    all_list.append(mean_list)
    all_list.append(mid_list)

    print("结束spark分析")
    # print(all_list.printSchema())
    return all_list

数据处理的时候遇到问题

数据格式问题

在这里插入图片描述
导致spark的DataFrame读到的数据格式出现问题
filter的算子把数据都过滤掉了,如下程序截图所示。

spark的filter算子他会过滤不符合的数据,如果我们的DataFrame的数据不是正常的显示 ,而是如下的乱码的格式。
那么filter算子会把数据全部过滤掉, 最终数据会是None
在这里插入图片描述

在read数据的时候加上编码就可以避免这样的问题
在这里插入图片描述
转成GBK就可以显示正常了

在这里插入图片描述

数据可视化

数据可视化生成界面程序(Draw.py)

# -*- coding: utf-8 -*-
import Draw
# -*- coding: utf-8 -*-

from pyecharts import Map
# 1. 准备数据
def draw_bar(all_list):
    print("开始绘图")
    attr = ["海沧", "湖里", "集美", "思明", "翔安", "同安"]

    v0 = all_list[0]
    v1 = all_list[1]
    v2 = all_list[2]
    v3 = all_list[3]

    # 信阳地图 数据为信阳市下的区县
    attrlist= ["海沧区", "湖里区", "集美区", "思明区", "翔安区", "同安区"]

    map3 = Map("厦门地图", '厦门', width=1600, height=800)
    map3.add("厦门", attrlist, v2, visual_range=[1000, 4000], maptype='厦门', is_visualmap=True, visual_text_color='#000')
    map3.render("厦门租房价格分析地图.html")

在这里插入图片描述

总程序启动(main.py)

# -*- coding: utf-8 -*-
import Draw
import Crawling_Data
import SparkBatch_Data

if __name__ == '__main__':


    print("开始总程序")
    Filename = "rent.csv"
    # Crawling_Data.run()
    all_list = SparkBatch_Data.spark_analyse(Filename)
    Draw.draw_bar(all_list)
    # print("结束总程序")

项目代码地址:github

项目成员

更多信息

在这里插入图片描述

  • 0
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值