论文复现假热的重分配分析(Visualizing patterns in spatially ambiguous point data)空间模糊点数据中模式的可视化,基于rasterio和geopan

1.论文及算法简要说明
论文地址
===简要说明:论文中提出了提出了一种新的算法,以智能地重新分配数据,否则将有助于这些"假热点",将它们移动到可能反映真实世界模式的同质规模的位置,从而允许创建更具代表性的可视化,而不会受到多尺度数据产生的"假热点"的负面影响。
===大致意思就是,从论文的数据来讲,论文以人口热点为例,数据是一个地区内的一些twitter用户在英国王妃婚礼这天发的动态,但是直接根据这些点来进行热力分析是不对的,算法种考虑了区域对于热力点的影像,使用该地区的人口来作为权重栅格,将每个点重新定位到该区域权重栅格值最大的区域,再使用衰减工式来进行热点的值分配,在分区内的点只会分配到自己分区内,不会跑到另一个分区去。
在这里插入图片描述

2.代码


import math
from time import time

from numpy.random import uniform

from rasterio import open as rio_open
import numpy as np
import geopandas as gpd
from shapely.geometry import Point
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.cm import ScalarMappable
from rasterio.plot import show as rio_show
from matplotlib_scalebar.scalebar import ScaleBar
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.pyplot import subplots, savefig, get_cmap, colorbar


# 计算样本点以及对应值
def calc_seed(w, r, x, y, polygon, ws_ds):
    azimuths = uniform(0, 360, 100)
    xs = uniform(x, x + r * np.sin(azimuths), 100)
    ys = uniform(y, y + r * np.cos(azimuths), 100)
    count = 0
    rd_points = []
    for i in range(0, 100):
        p = Point([xs[i], ys[i]])
        if p.within(polygon):
            rd_points.append(p)
            count += 1
            if count == w:
                break
    rd_points_df = gpd.GeoDataFrame( geometry=rd_points)
    # 获取随机点在权重栅格中的值
    rd_points_values = [
        ws_ds.read(1)[ws_ds.index(geom[1].geometry.x, geom[1].geometry.y)] for geom
        in
        rd_points_df.iterrows()]
    rd_points_df['w_values'] = rd_points_values
    # 最大值
    greatest_value = rd_points_df['w_values'].max()
    # 样本点seed
    greatest_geom = rd_points_df[rd_points_df['w_values'] == greatest_value]
    x_value = greatest_geom.geometry.x.values[0]
    y_value = greatest_geom.geometry.y.values[0]
    return x_value, y_value, greatest_value


def calc_heat(w, s):
    '''

    :param w: 权重表面的影响值
    :param s: 影响半径的用户自定义参数
    :return:
    '''
    root = r'data/'
    # 权重表面栅格文件
    ws_tif = root + '100m_pop_2019.tif'
    # 矢量分区数据文件 polygons for relevant administrative areas at each “level”
    ad_shp = root + 'gm-districts.shp'
    poi_shp = root + 'level3-tweets-subset.shp'
    # 读取点数据
    poi_layer = gpd.read_file(poi_shp)
    # 去除重复值
    # poi_layer.drop_duplicates(subset=['lat', 'lng'], keep='first', inplace=True)
    # 读取分区数据
    ads_layer = gpd.read_file(ad_shp)
    # 读取权重表面栅格元数据
    ws_ds = rio_open(ws_tif)
    # 读取nodata
    nodata = ws_ds.nodata
    # 读取权重表面栅格数据为numpy数组
    ws_ds_arr = ws_ds.read(1)

    # 容差
    res = ws_ds.res[0]
    # 宽
    width = ws_ds.width
    # 高
    height = ws_ds.height
    # 创建一个和权重栅格一样shape数组
    out_arr = np.full((height, width), nodata, np.float32)
    # 坐标系
    crs = 'EPSG:27700'
    # 循环分区,开始算法
    for i in ads_layer.iterrows():
        geom = i[1].geometry
        # 获取分区最小边界
        bound = geom.bounds
        radius = (((bound[2] - bound[0]) / 2) ** 2 + ((bound[3] - bound[1]) / 2) ** 2) ** 0.5

        within_points = []
        for p in poi_layer.iterrows():
            p_geom = p[1].geometry
            if p_geom.within(geom):
                within_points.append(p_geom)
        # loop through the points
        for point in within_points:
            px = point.x
            py = point.y
            # 在点周围radius半径内生成w个随机点
            mp_x, mp_y, max_value = calc_seed(w, radius, px, py, i[1].geometry, ws_ds)
            # 获取seed点的行列号
            row, col = ws_ds.index(mp_x, mp_y)
            # A is the area of the parent polygon of the seed
            # A是seed点对应面的面积
            A = i[1].geometry.area
            # the administrative area and the user variable s
            # 半径取决于分区面积和用户自定义变量s
            r = (A * s / math.pi) ** 0.5
            # 先搜索以点为中心正方形区域,再在里面搜索圆形
            # 正方形左下角left,bottom
            rect_bottom, rec_left = ws_ds.index(mp_x - r, mp_y - r)
            # 正方形右上角right,top
            rect_top, rec_right = ws_ds.index(mp_x + r, mp_y + r)
            rec_left = rec_left if rec_left > 0 else 0
            rec_right = rec_right if rec_right < width else width
            rect_top = rect_top if rect_top > 0 else 0
            rect_bottom = rect_bottom if rect_bottom < height else height

            for y in range(rect_top, rect_bottom):
                for x in range(rec_left, rec_right):
                    d = ((x - col) ** 2 + (y - row) ** 2) ** 0.5 * res
                    # 如果是样本点则距离为0
                    d = 0 if x == col and y == row else d
                    # 判断在半径内
                    if d <= r:
                        # 赋值公式
                        v = 1 - d / r
                        if ws_ds_arr[y, x] != nodata and ws_ds_arr[y, x] > 0:
                            # 如果已经被另一个随机点给赋值了就累加
                            if out_arr[y, x] != nodata and out_arr[y, x] > 0:
                                out_arr[y][x] += v * max_value
                            else:
                                out_arr[y][x] = v * max_value

    # 将计算区域未被赋值的区域赋值为0
    ws_ds_arr[ws_ds_arr != nodata] = 0
    out_arr[out_arr == nodata] = 0
    out_arr = ws_ds_arr + out_arr
    # 制作掩膜,nodata不显示
    out_arr = np.ma.masked_array(out_arr, out_arr == nodata)
    ws_ds.close()
    # return out_arr, ws_ds
    # output image
    fig, my_ax = subplots(1, 1, figsize=(16, 10))
    my_ax.set(title="manchester_tweets hot")
    rio_show(
        out_arr,
        ax=my_ax,
        transform=ws_ds.transform,
        cmap=get_cmap('gist_rainbow')
    )

    my_ax.add_artist(ScaleBar(dx=1, units="m", location="lower right"))
    fig.colorbar(ScalarMappable(
        norm=Normalize(vmin=np.floor(out_arr.min()), vmax=np.ceil(out_arr.max())),
        cmap=get_cmap('gist_rainbow')),
        ax=my_ax)
    savefig('out/manchester_tweets1.png', bbox_inches='tight')


if __name__ == '__main__':
    start_time = time()

    calc_heat(20, 0.01)
    print(f"completed in: {time() - start_time} seconds")

    # fig = plt.figure(figsize=(18, 18), dpi=80)
    # plt.xlabel('Influence of Weighting Surface')
    # plt.ylabel('Degree of Spatial Ambiguity')
    # ws = [5, 20, 50]
    #
    # ss = [0.01, 0.1, 0.5]
    # is_ylabels = [1, 4, 7]
    # is_xlabels = [7, 8, 9]
    # plt.xticks(ws)
    # plt.yticks(ss)
    # count = 0
    # for i in range(len(ss)):
    #     s = ss[i]
    #     for j in range(len(ss)):
    #         count += 1
    #         w = ws[j]
    #         print(w)
    #         print(s)
    #         print(count)
    #         ax_number = str(len(ss)) + str(len(ss)) + str(count)
    #         arr, ws_ds = calc_heat(w, s)
    #         ax = plt.subplot(int(ax_number))
    #         # add north arrow
    #         x, y, arrow_length = 0.97, 0.99, 0.1
    #         ax.annotate('N', xy=(x, y), xytext=(x, y - arrow_length),
    #                     arrowprops=dict(facecolor='black', width=5, headwidth=15),
    #                     ha='center', va='center', fontsize=20, xycoords=ax.transAxes)
    #         # ax.axis('off')
    #         ax.set_xticks([])
    #         ax.set_yticks([])
    #         colorbar = fig.colorbar(ScalarMappable(cmap=get_cmap('gist_rainbow')), ax=ax)
    #         colorbar.ax.set_yticklabels(["min", "", "", "", "", 'max'])
    #         # ax.get_xaxis().set_visible(False)
    #         # ax.get_yaxis().set_visible(False)
    #         if count in is_xlabels:
    #             ax.set_xlabel('w= ' + str(w))
    #         if count in is_ylabels:
    #             ax.set_ylabel('s= ' + str(s))
    #
    #         rio_show(
    #             arr,
    #             ax=ax,
    #             transform=ws_ds.transform,
    #             cmap=get_cmap('gist_rainbow'),
    #
    #         )
    # # colorbar = fig.colorbar(ScalarMappable(cmap=get_cmap('gist_rainbow')), ax=fig.axes)
    # # colorbar.ax.set_yticklabels(["min", "", "", "", "", 'max'])
    # savefig('out/manchester_tweets.png', bbox_inches='tight')


3.结果
在这里插入图片描述
4.数据
https://download.csdn.net/download/weixin_44265800/20721665

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值