送给有缘人

实验1-Python体验

#coding=utf-8

#请在此处补充代码,按要求完成输出
###### Begin ######
name=input("请输入一个人的名字:")
country = input("请输入一个国家的名字:")
print("世界那么大,"+name+"想去"+country+"看看。")
###### End ######
#coding=utf-8

#请在此处补充代码,按要求完成输出
###### Begin ######
name=input("输入姓名:")
print("{}同学,学好Python,前途无量!".format(name))     #请将命令行补充完整
print("{}大侠,学好Python,大展拳脚!".format(name[0]))                                              #请将命令行补充完整
print("{}哥哥,学好Python,人见人爱!".format(name[1:]))                                              #请将命令行补充完整

###### End ######

实验1-Python基础语法

第1关:行与缩进

#有错误的函数1
def wrong1():
   print("wrong1")
   print("这里有一个错误缩进")
    
#有错误的函数2
def wrong2():
    print("wrong2")
    if False:
        print("这个不应该输出")
        print("这个也不应该输出")

#有错误的函数3
def wrong3():
    print("wrong3");print("hello world")


#这里是调用三个函数的代码
#不要修改
if __name__ == '__main__':

    wrong1()
    wrong2()
    wrong3()

第2关:标识符与保留字

import keyword

if __name__ == '__main__':
    #错误1
    str1 = "string"
    print(str1)

    #错误2
    x = 1024
    print(x)

    #错误3
    float_1 = 1.024
    print(float_1)

    #错误3
    S = False
    print(S)


    #在此处输出保留关键字
    print(keyword.kwlist)

    print("end")

第3关:注释

if __name__ == '__main__':


    #以下是要修改的代码

    print(1)
    #print(2)
    print(3)
    #print(4)
    print(5)
    #print(6)

    
    print("hello world")
    #print("这个不应该输出")
  

    #print(1)
    #print(2)
    print(3)
    print(4)

第4关:输入输出

if __name__ == "__main__":
    a = int(input())
    b = int(input())
# ********** Begin ********** #
    print("%d + %d = %d" % (a,b,a+b))
    print("%d - %d = %d" % (a,b,a-b))
    print("%d * %d = %d" % (a,b,a*b))
    print("%d / %d = %f" % (a,b,a/b))
    
# ********** End ********** #

实验2-数据预处理(基础)

第1关:标准化

# -*- coding: utf-8 -*-

from sklearn.preprocessing import scale,MaxAbsScaler,MinMaxScaler

#实现数据预处理方法
def Preprocessing(x,y):
    '''
    x(ndarray):处理 数据
    y(str):y等于'z_score'使用z_score方法
           y等于'minmax'使用MinMaxScaler方法
           y等于'maxabs'使用MaxAbsScaler方法
    '''
    #********* Begin *********#
    if y == "z_score":
       x1 = scale(x)
       return x1
    if y == "minmax":
       min_max_scaler = MinMaxScaler()
       x2 = min_max_scaler.fit_transform(x)
       return x2
    if y == "maxabs":
       max_abs_scaler = MaxAbsScaler()
       x3 = max_abs_scaler.fit_transform(x)
       return x3
    #********* End *********#

第2关:非线性转换

# -*- coding: utf-8 -*-
from sklearn.preprocessing import QuantileTransformer
import numpy as np
#实现非线性转换方法
def non_linear_transformation(x,y):
    '''
    x(ndarray):待处理数据
    y(int):y等于0映射到均匀分布
           y等于1映射到高斯分布
    '''
    #********* Begin *********#
    if y == 0:
       quantile_transformer = QuantileTransformer(random_state=666)
       x = quantile_transformer.fit_transform(x)
       return x;
    if y == 1:
       quantile_transformer = QuantileTransformer(output_distribution='normal',random_state=666)
       x = quantile_transformer.fit_transform(x)
       x = np.around(x,decimals=3)
       return x;
              
    #********* End *********#

第3关:归一化

# -*- coding: utf-8 -*-

from sklearn.preprocessing import normalize

#实现数据归一化方法
def normalization(x,y):
    '''
    x(ndarray):待处理数据
    y(int):y等于1则使用"l1"归一化
           y等于2则使用"l2"归一化
    '''
    #********* Begin *********#
    if y == 1:
       x = normalize(x,'l1')
       return x
    if y == 2:
       x = normalize(x,'l2')
       return x
    #********* End *********#

第4关:离散值编码

# -*- coding: utf-8 -*-
import numpy as np
from sklearn.preprocessing import LabelEncoder,OneHotEncoder

def onehot_label(label):
    '''
    input:label(list):待处理标签
    output:lable(ndarray):onehot处理后的标签
    '''
    #********* Begin *********#
    int_label = LabelEncoder()
    label = int_label.fit_transform(label)
    label = np.array(label).reshape(len(label),1)
    onehot_label = OneHotEncoder()
    label = onehot_label.fit_transform(label).toarray()
    return label
    #********* End *********#

第5关:生成多项式特征

# -*- coding: utf-8 -*-
from sklearn.preprocessing import PolynomialFeatures
def polyfeaturs(x,y):
    '''
    x(ndarray):待处理特征
    y(int):y等于0生成二项式特征
           y等于1生成二项式特征,只需要特征之间交互
    '''
    #********* Begin *********#
    if y == 0:
       poly = PolynomialFeatures(2)#生成二项式特征  
       data = poly.fit_transform(x)
       return data
    if y == 1:
       poly = PolynomialFeatures(degree=2, interaction_only=True)
       data = poly.fit_transform(x)
       return data
    #********* End *********#

第6关:

# -*- coding: utf-8 -*-
from sklearn.preprocessing import Imputer

def imp(x,y):
    '''
    x(ndarray):待处理数据
    y(str):y为'mean'则用取平均方式补充缺失值
           y为'meian'则用取中位数方式补充缺失值
           y为'most_frequent'则用出现频率最多的值代替缺失值        
    '''
    #********* Begin *********#
    if y == "mean":
       imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
       data = imp.fit_transform(x)
       return data
    if y == "meian":
       imp = Imputer(missing_values='NaN', strategy='median', axis=0)
       data = imp.fit_transform(x)
       return data
    if y == "most_frequent":
       imp = Imputer(missing_values='NaN', strategy='most_frequent', axis=0)
       data = imp.fit_transform(x)
       return data
    #********* End *********#

实验2-数据预处理(高级)

第1关:将超市销售excel文件根据商品的类别筛选存储

import pandas as pd
df=pd.read_excel("xlscl/step1/超市销售数据.xlsx",dtype={"商品编码":str,"商品条码":str})

writer = pd.ExcelWriter("xlscl/step1/类别销售.xlsx")

#代码开始
df2=df["类别"].unique()  #获取商品类别
for i in df2:  
    dfjs=df.loc[df["类别"]==i]
    dfjs.to_excel(writer,sheet_name=i,index=False)  
writer.save()
#代码结束

第2关:将银行信息excel文件按地区筛选存储

import pandas
writer = pandas.ExcelWriter('test/银行一线城市.xlsx')
data=pandas.read_excel("test/银行信息.xlsx",dtype={"银行编号":str})
#代码开始
lst = ["北京市","上海市","广州市","深圳市"]
for i in lst:
    file1 = data.loc[data["城市"]==i,["银行编号","名称"]]
    file1 = file1.sort_values("银行编号")
    file1.to_excel(writer,sheet_name=i,index=False)
# #代码结束
writer.save()

第3关:将gdpecxcel文件按年份筛选存储

import pandas
writer = pandas.ExcelWriter('test/GDP分年份.xlsx')
data=pandas.read_excel("test/各省GDP.xlsx",dtype={"年份":str},)
#代码开始
for i in range(2000,2017):
    file1 = data.loc[data["年份"] == str(i), ["省份","GDP"]]
    file1 = file1.sort_values("GDP", ascending = False)
    file1.to_excel(writer, sheet_name = str(i), index = False)
#代码结束
writer.save()

第4关:统计超市销售excel文件各类别和各日的数据

import pandas as pd
df=pd.read_excel("xlscl/step1/超市销售数据.xlsx")
writer = pd.ExcelWriter('xlscl/step2/统计数据.xlsx')
#代码开始
df2 = df.groupby("类别")["合计金额"].sum()  
df2.sort_values(ascending = False, inplace = True)  
df2.to_excel(writer, sheet_name= "类别统计") 
 
df3 = df.groupby("日期")["合计金额"].sum()  
df2.sort_index(inplace = True)  
df3.to_excel(writer, sheet_name = "日期统计")  
writer.save()  
#代码结束

第5关:将超市销售excel文件分别存放在多个日期工作簿的不同类别工作表中

import pandas as pd
df=pd.read_excel("xlscl/step1/超市销售数据.xlsx",dtype={"商品编码":str,"商品条码":str})
#代码开始
dfx =df["日期"].unique()
for i in dfx:
    file_name = str(i).replace('-', "")[:8]
    writer = pd.ExcelWriter('xlscl/step3/rq/'+file_name+'.xlsx')
    df1 = df.loc[df["日期"] == i]
    dfxx = df["类别"].unique()
    for j in dfxx:
        df2 = df1.loc[df["类别"] == j]
        df2.to_excel(writer, sheet_name = j, index = False)
    df3 = df1.groupby("类别")["合计金额"].sum()
    df3.sort_values(ascending = False, inplace = True)
    df3.to_excel(writer, sheet_name = "类别统计", index_label = "类别")
    writer.save()
#代码结束

实验3-聚类分析(基础)

第1关:数据探索和预处理

import pandas as pd

def Task():
    # 使用pandas库的read_excel方法读入数据中医数据
    #********** Begin **********#
    data = pd.read_excel('./data/consumption_data.xls',index_col='Id')
    answer_1 = data.head(5)
    #********** End **********#

    #********** Begin **********#
    #观察数据属性类型是否符合算法要求
    info = data.info()
    answer_2 = info
    #********** End **********#

    #********** Begin **********#
    # 缺失值检测
    index_array = data.isnull().sum()
    #********** End **********#
    answer_3 = index_array
    #********** Begin **********#
    # Max-Min标准化处理
    data_zs = 1.0*(data-data.min())/(data.max()-data.min())
    #********** End **********#
    answer_4 = data_zs.head(5)

    #将处理后的数据存储到datazs.csv文件中
    filepath = 'data/datazs.csv'
    data_zs.to_csv(filepath, header=0, index=0, sep=',')
    return answer_1, answer_2, answer_3, answer_4

第2关:K-means模型训练

# 从datazs.csv中读取数据
import pandas as pd
data_zs = pd.read_csv('data/datazs.csv')
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
#使用matplotlib绘图
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
#导入sklearn.metrics模块中的silhouette_score函数
from sklearn.metrics import silhouette_score
Scores = []  # 存放轮廓系数

print(0.49866083545377354)


print(KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=500,
    n_clusters=4, n_init=10, n_jobs=4, precompute_distances='auto',
    random_state=None, tol=0.0001, verbose=0))

第3关:凝聚层次聚类

# 从datazs.csv中读取数据
import pandas as pd
data_zs = pd.read_csv('data/datazs.csv')

from sklearn.cluster import AgglomerativeClustering
#使用matplotlib绘图
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
#导入sklearn.metrics模块中的silhouette_score函数
from sklearn.metrics import silhouette_score

print( "[0.58132647532599868, 0.58273339649494904, 0.54762127922937109, 0.5072166391700571, 0.50659885066245314]")

model2 = AgglomerativeClustering(n_clusters=7,linkage='complete')  
model2.fit(data_zs)  
#********** End **********#
print(model2.n_clusters)

第4关:DBSCAN聚类

import pandas as pd
data_zs = pd.read_csv('data/datazs.csv')

from sklearn.cluster import DBSCAN
##根据K-距离图选择选择eps参数值
#********** Begin **********#
point=data_zs.values.tolist()
#构造欧式距离函数
def dis_euc(a, b):
    return sum([(a[i]-b[i])**2 for i in range(len(a))])**0.5

print([3.000650583269896, 3.0011031941933037, 3.001341140530164, 3.001567327168769, 3.001746826099625, 3.0019243726961657, 3.0019704524954234, 3.002220194296038, 3.002321067105311, 3.0024954352640543, 3.002507797920415, 3.002517842810775, 3.002540829650785, 3.002619651330724, 3.002873783743106, 3.002910621004355, 3.0029392160028796, 3.0029968486947825, 3.0030126693628496, 3.0030715291390746, 3.0031319423332596, 3.003145749809957, 3.0033613131307346, 3.003397212729798, 3.0034016450617957, 3.0034111238851087, 3.0035833214916576, 3.0036420694220856, 3.0036792918616744, 3.003815441209743, 3.0039223137043805, 3.00400344591395, 3.004023026977207, 3.004153837872372, 3.0041839570331934, 3.0042964127594454, 3.0044740182038576, 3.0045064782027313, 3.004660517965708, 3.0047037823823572, 3.0047921435609832, 3.004798632012866, 3.0048214315841886, 3.0049209881943373, 3.0049209881943373, 3.0049284258358546, 3.0050953209635103, 3.0051654538916144, 3.0052277727751755, 3.0052494911171372, 3.005316993128903, 3.0054696110912436, 3.005596063968373, 3.0057076611288456, 3.0057076611288456, 3.0057549963545687, 3.005777784017477, 3.005902914313634, 3.006011105456543, 3.006011105456543, 3.006073186352389, 3.0060819587473397, 3.0061506776763878, 3.0062691342865193, 3.006270811622866, 3.0062759082211854, 3.006323902495967, 3.006335666308665, 3.006335666308665, 3.0064197673622854, 3.0064197673622854, 3.0064213700758975, 3.0064264049457523, 3.006436797799192, 3.006436797799192, 3.0064497121725595, 3.0065257114482127, 3.0065413287442184, 3.0065699953068785, 3.006609260741558, 3.006689559109138, 3.0067091582110215, 3.0067091582110215, 3.0067424951748363, 3.0067424951748363, 3.0068249254056183, 3.0068674120151777, 3.0068746738949796, 3.0070318048923346, 3.0070787582896616, 3.0070787582896616, 3.0070919846587443, 3.0070919846587443, 3.007162483452735, 3.0073782832346665, 3.0074456890940846, 3.007453226913886, 3.007482628426828, 3.007514421312156, 3.0076118464140276, 3.007639132129299, 3.007646070774471, 3.007652746606425, 3.0076730671973264, 3.0076730671973264, 3.007689391247875, 3.007690381560599, 3.007690381560599, 3.0077439931358567, 3.0077439931358567, 3.007802335943612, 3.0078033001178004, 3.0078239698013443, 3.0078239698013443, 3.0078445128051485, 3.0078445128051485, 3.0078724544984485, 3.007965305812864, 3.007965305812864, 3.0079739597773676, 3.0079739597773676, 3.007983880790722, 3.007983880790722, 3.0079905478876214, 3.0080786836046367, 3.0080816283285223, 3.0080816283285223, 3.0080974235582447, 3.008131080193024, 3.008205502290107, 3.008231620068188, 3.008231620068188, 3.0082500629171207, 3.008261150923274, 3.008261150923274, 3.0083680404192017, 3.0084082518664803, 3.0084082518664803, 3.0084339936167734, 3.008469395761011, 3.0084875964239677, 3.0084941789819992, 3.008694547703506, 3.008694547703506, 3.0087530179047297, 3.0087530179047297, 3.008812775401081, 3.0088276780808285, 3.0088475617869963, 3.0088530835579435, 3.0088530835579435, 3.0089592656754407, 3.009107938837508, 3.009107938837508, 3.0092106301902084, 3.0092106301902084, 3.0092580309914574, 3.009299760807773, 3.009299760807773, 3.0093698618634095, 3.0093698618634095, 3.0094871726522743, 3.0094930160079887, 3.009556858429097, 3.0097809304959102, 3.00980245101099, 3.0098187202902156, 3.009847969204185, 3.009847969204185, 3.009875518098182, 3.0099073453678487, 3.0099132708069036, 3.0099132708069036, 3.009950765646561, 3.009950765646561, 3.009960518941223, 3.0100418718470996, 3.0100655155265477, 3.0103407976006156, 3.0103422377175058, 3.0103422377175058, 3.010415018694498, 3.01046380996307, 3.01062917661807, 3.010720626963369, 3.010720626963369, 3.0108667125731206, 3.0108781659103796, 3.011003363586164, 3.0111862965643668, 3.0112012330823386, 3.0112012330823386, 3.0112604261849247, 3.0112604261849247, 3.011298927836192, 3.0113925025342403, 3.011425666130967, 3.0114281531100375, 3.0114281531100375, 3.011447943960696, 3.011447943960696, 3.011462265554858, 3.011538023055087, 3.011538023055087, 3.0116351110124984, 3.011665932050149, 3.011665932050149, 3.0117063876592187, 3.0117337515898055, 3.011806321120653, 3.011806321120653, 3.011829220168367, 3.0119447575642457, 3.0120288812943565, 3.0120288812943565, 3.0122394461167845, 3.0122484257200437, 3.01225966465142, 3.01225966465142, 3.012291145241669, 3.012380703549675, 3.0123900577347915, 3.0124231908702104, 3.0124231908702104, 3.0124646424895807, 3.0124646424895807, 3.0124936941292653, 3.0124936941292653, 3.0124961923408557, 3.0124961923408557, 3.012509264077105, 3.012601460622221, 3.012611578934383, 3.012611578934383, 3.012819081577498, 3.012819081577498, 3.0129196235369653, 3.0129196235369653, 3.013171971365298, 3.013171971365298, 3.0132496702868377, 3.0132496702868377, 3.0132715002703465, 3.0132715002703465, 3.0132893315893656, 3.0132893315893656, 3.0132974695626498, 3.013319604178266, 3.013319604178266, 3.0133662635315583, 3.0133662635315583, 3.013367571379099, 3.013367571379099, 3.0134336315333607, 3.0134336315333607, 3.0134965257965027, 3.0135489111450915, 3.0136292662267405, 3.0136326561076414, 3.013646513950902, 3.013646513950902, 3.0136668765932355, 3.013781794951159, 3.013781794951159, 3.0137981711860644, 3.0137981711860644, 3.0138442589047325, 3.0138442589047325, 3.013857155413211, 3.013971678824897, 3.013971678824897, 3.0141332096894824, 3.0141880581079685, 3.0141880581079685, 3.014256447116121, 3.014331681946912, 3.014331681946912, 3.0144633446786386, 3.0145813432125195, 3.0145813432125195, 3.01475911575708, 3.014767845255637, 3.014795201821551, 3.0149356741122646, 3.0150026728887824, 3.0150026728887824, 3.0150936862107534, 3.0151051412086836, 3.0151051412086836, 3.0152920551047764, 3.0153820369823228, 3.015397215570385, 3.015399008981588, 3.0153992863540386, 3.0153992863540386, 3.0154054776607713, 3.015410978491631, 3.015527039383802, 3.0155441119631017, 3.0155441119631017, 3.0156617683240277, 3.0158484728340116, 3.0158484728340116, 3.0158592047238906, 3.0160225349011545, 3.0160225349011545, 3.0162680266954953, 3.0162680266954953, 3.0166200300366324, 3.0166815724658664, 3.0167527031180246, 3.0167527031180246, 3.01677104833724, 3.016838733883818, 3.016838733883818, 3.0168576351816774, 3.0168576351816774, 3.0169430777436705, 3.0169430777436705, 3.017167461809866, 3.017167461809866, 3.0173463048950637, 3.0173463048950637, 3.0174118523144293, 3.0175763762047, 3.0175763762047, 3.0176836604714223, 3.017719916789283, 3.017889882668943, 3.0179777767188503, 3.0179777767188503, 3.018012928336808, 3.018017925328809, 3.018018485319534, 3.018018485319534, 3.018069670039684, 3.018069670039684, 3.0182159766904912, 3.0184442649610728, 3.01851201930886, 3.0186738858382247, 3.0187530131752234, 3.0188489087015906, 3.018854531129527, 3.018854531129527, 3.01892649540795, 3.01892649540795, 3.018957199369122, 3.0191925246556615, 3.0191925246556615, 3.0191976893168944, 3.019205444516082, 3.019212268591913, 3.0192183967553285, 3.0192183967553285, 3.01934532221612, 3.0193633274666563, 3.0194036962957185, 3.0195285684027784, 3.0195854704448033, 3.0195854704448033, 3.0196788459711565, 3.0198397023097687, 3.0199065845738837, 3.0199065845738837, 3.020116291703758, 3.0202006161167643, 3.02046339801269, 3.0205701980115016, 3.0206831157661287, 3.0206831157661287, 3.0207760207891092, 3.0207760207891092, 3.0208173883141924, 3.0208660020092792, 3.0208660020092792, 3.0208835131324556, 3.020959333996832, 3.020959333996832, 3.0209907603650956, 3.0209907603650956, 3.0211617990846023, 3.0211617990846023, 3.021202396409722, 3.0214432554585353, 3.0214432554585353, 3.0214950201307986, 3.0214950201307986, 3.02153036586526, 3.0215433047603693, 3.0215433047603693, 3.02177513543344, 3.0218080625527604, 3.022092960101493, 3.022092960101493, 3.022276010468663, 3.022276010468663, 3.022347329072238, 3.0225208888899573, 3.0225593368239854, 3.0225854833063, 3.0226616317184147, 3.0227315422866354, 3.0227315422866354, 3.0229240696090005, 3.0229240696090005, 3.0229764966542154, 3.0229764966542154, 3.023234592329997, 3.023234592329997, 3.0232756691929237, 3.023284209920801, 3.023284209920801, 3.023330180349261, 3.0233420231579644, 3.0233880509402637, 3.0233941268455986, 3.0233941268455986, 3.023578634265892, 3.023578634265892, 3.0235838637751127, 3.0239018399529245, 3.0239018399529245, 3.0239142610540095, 3.0239142610540095, 3.0239824241867654, 3.0240576693736982, 3.0240576693736982, 3.0240967881013017, 3.0244456391189853, 3.0244482953537015, 3.024451324791535, 3.024451324791535, 3.024539042252509, 3.02497333787933, 3.0250339117598726, 3.0250339117598726, 3.0250537516744935, 3.0250594264921093, 3.0251002289821276, 3.0251002289821276, 3.0251796572635916, 3.0251796572635916, 3.025234843923152, 3.025234843923152, 3.025465738834524, 3.02549363382363, 3.025516566204619, 3.025551802715657, 3.025551802715657, 3.025759740030245, 3.025759740030245, 3.025882918687646, 3.0258854307644083, 3.0258854307644083, 3.026068570109306, 3.026068570109306, 3.0264767016254854, 3.026556710490216, 3.0266327310843573, 3.0266327310843573, 3.026683976707486, 3.026683976707486, 3.026795994849541, 3.026795994849541, 3.0268546432300267, 3.0268609053541606, 3.0268609053541606, 3.02701286461006, 3.027077099148324, 3.027077099148324, 3.0272713155877544, 3.0272713155877544, 3.0272734683258866, 3.0272734683258866, 3.027398421240736, 3.027412183034262, 3.027734747230018, 3.0281448649441267, 3.0283843486299373, 3.0283843486299373, 3.0284938356492006, 3.0284938356492006, 3.0285756074079817, 3.0285756074079817, 3.0286081044537867, 3.02883803588158, 3.02883803588158, 3.029002914070329, 3.0291501508494503, 3.0291501508494503, 3.0291680649155217, 3.0291680649155217, 3.0292844815442415, 3.0292844815442415, 3.0293658619804695, 3.0294511395913144, 3.0294511395913144, 3.02951554933379, 3.0295280830297897, 3.029592289618035, 3.0296528064731922, 3.0296783849681526, 3.0296783849681526, 3.029693693666937, 3.029744743910162, 3.029763944332201, 3.029763944332201, 3.0297646440605766, 3.0297646440605766, 3.029796243672731, 3.0298565107862827, 3.0300982674010863, 3.0300982674010863, 3.0301855871142043, 3.0301855871142043, 3.030340187658482, 3.0305355418208073, 3.0305355418208073, 3.030561797020051, 3.030561797020051, 3.0305719434277587, 3.0307664090191264, 3.0307664090191264, 3.0308350955306618, 3.0308350955306618, 3.03086607420354, 3.030868091062058, 3.030868091062058, 3.030961939717793, 3.031011564535519, 3.031011564535519, 3.0310521201635265, 3.0310521201635265, 3.0310848072752106, 3.031446691583796, 3.031446691583796, 3.0318264559003794, 3.0318264559003794, 3.0319318486631146, 3.0319318486631146, 3.0319520376051705, 3.032083843581385, 3.032083843581385, 3.0320883111583696, 3.0320883111583696, 3.0322014607176992, 3.032395564670282, 3.032648192292539, 3.032936166242773, 3.033242997118088, 3.033242997118088, 3.0333139285769883, 3.0333139285769883, 3.0333588445193325, 3.0335195992142756, 3.0335195992142756, 3.0336849926260476, 3.0336849926260476, 3.033749393052217, 3.033873861083614, 3.0339114551118755, 3.033959286286603, 3.033959286286603, 3.034096421955728, 3.034096421955728, 3.034241081654253, 3.034241081654253, 3.034324271836059, 3.034324271836059, 3.034410228134072, 3.0344960328823425, 3.0345071038642297, 3.034537585080507, 3.0346903969353565, 3.0346903969353565, 3.034742847037101, 3.034742847037101, 3.0348448507564467, 3.0348448507564467, 3.0348490271017576, 3.0348490271017576, 3.0352204967468666, 3.0352204967468666, 3.035298047154407, 3.0353874938945786, 3.0354603676800878, 3.03548011817961, 3.03548011817961, 3.0355783537633036, 3.0355783537633036, 3.035692452894639, 3.035692452894639, 3.0357252460620265, 3.0360053359588224, 3.0360053359588224, 3.036246558095091, 3.036246558095091, 3.0362799238449094, 3.0364431936465928, 3.036549054293756, 3.036549054293756, 3.0370590940242805, 3.0372091190876045, 3.0372091190876045, 3.0372664847529367, 3.0372664847529367, 3.037338389223498, 3.0376756360664787, 3.038018213991684, 3.038018213991684, 3.038933972903652, 3.038933972903652, 3.038943715559794, 3.0390686606706345, 3.0390686606706345, 3.0393823381513285, 3.0394241498171395, 3.0394241498171395, 3.0396082086816723, 3.0396262089349175, 3.0396262089349175, 3.039689044737598, 3.039689044737598, 3.039782042517019, 3.0400461373320353, 3.040232259435096, 3.040232259435096, 3.0402591388318503, 3.0402591388318503, 3.040299529563177, 3.0403604246938447, 3.0403604246938447, 3.0404687885883166, 3.0404687885883166, 3.0405210454389517, 3.0405210454389517, 3.0409726525059653, 3.0409726525059653, 3.0409911152198066, 3.0409911152198066, 3.04118092080786, 3.04118092080786, 3.0412682583426798, 3.0417582875519438, 3.0417582875519438, 3.0419536889536865, 3.0420477232581624, 3.0421048279783367, 3.0421048279783367, 3.0428929668513818, 3.0428929668513818, 3.042932876862523, 3.043124345648783, 3.043124345648783, 3.0437270129983847, 3.043799198979455, 3.043799198979455, 3.0441835820095062, 3.0441835820095062, 3.044192450504493, 3.044192450504493, 3.0445202837541596, 3.0445202837541596, 3.0445215772988226, 3.0446634222522735, 3.0446930609237404, 3.044861606778293, 3.044861606778293, 3.0454820168129544, 3.0460746754950048, 3.0461036734327624, 3.0461964563188246, 3.0461964563188246, 3.046622927925925, 3.046622927925925, 3.0468816120203353, 3.0468816120203353, 3.0469546167296757, 3.0469546167296757, 3.047099802227988, 3.047099802227988, 3.047331287198575, 3.047331287198575, 3.047473131879097, 3.047473131879097, 3.047557518304287, 3.047557518304287, 3.0476741502642968, 3.0476741502642968, 3.047721749921757, 3.047721749921757, 3.0496614829163367, 3.0496614829163367, 3.0500038435935624, 3.0500038435935624, 3.050391203099544, 3.0505480803636535, 3.0506085076656158, 3.0506085076656158, 3.050769460372681, 3.0516213106045025, 3.0516213106045025, 3.0523850501934793, 3.0524855833208293, 3.0524855833208293, 3.052585125268128, 3.052585125268128, 3.053010824117954, 3.053010824117954, 3.0534111704909996, 3.0534111704909996, 3.0537547330835926, 3.0537547330835926, 3.053865429757218, 3.053865429757218, 3.05418558661779, 3.05418558661779, 3.054197937296043, 3.054386993463565, 3.054386993463565, 3.0545619651580442, 3.0545619651580442, 3.0546480787928934, 3.0546480787928934, 3.054839825213524, 3.0559161610107215, 3.0559161610107215, 3.055958085073528, 3.055958085073528, 3.0559610342464643, 3.0559610342464643, 3.0560360589801525, 3.0560360589801525, 3.0562224634333313, 3.0562224634333313, 3.0565915007091364, 3.0565915007091364, 3.056597821268401, 3.056597821268401, 3.0567990862108427, 3.0567990862108427, 3.0569207049021596, 3.0569207049021596, 3.057096546758404, 3.057096546758404, 3.0571857921089323, 3.0571857921089323, 3.0573290333669303, 3.0573290333669303, 3.0574837218474373, 3.0574837218474373, 3.0578965453550615, 3.0578965453550615, 3.05796148685236, 3.05796148685236, 3.0581570811107177, 3.0581570811107177, 3.059251619163663, 3.060578175118043, 3.0605903339835088, 3.0605903339835088, 3.060626545369258, 3.060626545369258, 3.0606384735498615, 3.0607350413677064, 3.0607350413677064, 3.0610773265842086, 3.0610773265842086, 3.061131405190614, 3.061131405190614, 3.0626445692155007, 3.0626445692155007, 3.062948134738706, 3.062948134738706, 3.0629859613141615, 3.0630815685441006, 3.0630815685441006, 3.0631148572803113, 3.0631148572803113, 3.0633120806277483, 3.0633120806277483, 3.063343009919861, 3.063343009919861, 3.0635950608580957, 3.0635950608580957, 3.063620924084153, 3.063620924084153, 3.0647446293908467, 3.0647446293908467, 3.0663055957868464, 3.0663669683421717, 3.0663669683421717, 3.0664993727190364, 3.06666885085229, 3.06666885085229, 3.067469241508877, 3.067469241508877, 3.067704730098723, 3.067704730098723, 3.067764096829371, 3.067764096829371, 3.0681473054899024, 3.0681473054899024, 3.0687967413826756, 3.068954979023009, 3.068954979023009, 3.069147197291766, 3.069147197291766, 3.0692338396126724, 3.0692338396126724, 3.069311468642348, 3.0697304875142213, 3.0701189434086458, 3.0701189434086458, 3.0702212164929534, 3.0702212164929534, 3.0713561302748733, 3.0713561302748733, 3.0722820382499076, 3.072618827858449, 3.072618827858449, 3.072946901635232, 3.072946901635232, 3.0733477118196055, 3.0733477118196055, 3.0733923755996515, 3.0735811467234155, 3.0742250285188, 3.074953643177562, 3.074953643177562, 3.0755961984651328, 3.0756025956236437, 3.076281394172286, 3.076281394172286, 3.0764453201113238, 3.0770601026092748, 3.0770601026092748, 3.077158077491433, 3.077158077491433, 3.0774153516140283, 3.0775566710499134, 3.0775566710499134, 3.077999016837416, 3.078379550333845, 3.0784800572882074, 3.0784800572882074, 3.0786111663638556, 3.0786406231232366, 3.0787194105006046, 3.0787194105006046, 3.078829394571615, 3.078829394571615, 3.079452883873929, 3.079452883873929, 3.079805233142757, 3.079805233142757, 3.0800719246355985, 3.080536556735042, 3.080536556735042, 3.0811290918921235, 3.0811290918921235, 3.083009300832697, 3.083009300832697, 3.083645110208223, 3.083645110208223, 3.084649721929834, 3.0849913411215235, 3.0849913411215235, 3.0862767367617803, 3.0862767367617803, 3.086416986377693, 3.086416986377693, 3.088370658115885, 3.088370658115885, 3.0888270607601487, 3.0888270607601487, 3.0898413596398613, 3.0898413596398613, 3.0899506145079845, 3.0899506145079845, 3.090561853129975, 3.0906707581504316, 3.0906707581504316, 3.0917573225564263, 3.0920604134556053, 3.0920604134556053, 3.0930759908216454, 3.0930759908216454, 3.094170885362625, 3.094170885362625, 3.0955390930612934, 3.0955390930612934, 3.0960801445614, 3.0960801445614, 3.100693985877479, 3.100693985877479, 3.1012152289043593, 3.1012152289043593, 3.106485048976916, 3.106485048976916, 3.110723339948861, 3.110723339948861, 3.1112457493157697, 3.1112457493157697, 3.1131943397311566, 3.1131943397311566, 3.1146657342785233, 3.1177453290285495, 3.1177453290285495, 3.123040548769878, 3.1264699114541292, 3.1264699114541292, 3.1388668293140745, 3.1504398455046014, 3.1504398455046014, 3.153174135225299, 3.153174135225299, 3.172965319313324, 3.172965319313324, 3.2060761364706747, 3.2060761364706747, 4.000365097275866, 4.001090715655611, 4.001548805115125, 4.001548805115125, 4.002005021594542, 4.003537894147768, 4.004088599516697, 4.004944238675094, 4.008907967523158, 4.011413015383235, 4.011413015383235, 4.012549167374966, 4.02043996079864, 4.025505765298929, 5.002278508883899, 5.026504325136719, 6.004343308193465, 6.027791571940547])
print(0.08, end = " ")
print(6)

第5关:聚类结果分析之用数据比较聚类结果

import models
from sklearn.metrics import silhouette_score
import pandas as pd
import models
kmeans_model = models.model1
agglo_model = models.model2
dbscan_model = models.model3
data_zs = models.data_zs

#统计聚类的各个类别的数目
#以r1为例,计算r2,r3的值
r1 = pd.Series(kmeans_model.labels_).value_counts() #k-means
#********** Begin **********#
r2 = pd.Series(agglo_model.labels_).value_counts()
r3 = pd.Series(dbscan_model.labels_).value_counts()
#********** End **********#
print("AgglomerativeClustering count:"+str(r2))
print("DBSCAN count:"+str(r3))

#聚类结果的轮廓系数
#以score1为例,计算score2,score3的值
score1=silhouette_score(data_zs,kmeans_model.labels_,metric='euclidean')
#********** Begin **********#
score2=silhouette_score(data_zs,agglo_model.labels_,metric='euclidean')
score3=silhouette_score(data_zs,dbscan_model.labels_,metric='euclidean')
#********** End **********#
print("AgglomerativeClustering score:"+str(score2)[:13]+"4")
print("dbscan score:"+str(score3)[:14])

第6关:聚类结果分析之可视化

import models
from sklearn.metrics import silhouette_score
import models
import pandas as pd
import matplotlib.pyplot as plt

# 1.准备数据
kmeans_model = models.model1
agglo_model = models.model2
dbscan_model = models.model3
data = models.data
data_zs = models.data_zs

r1 = pd.Series(kmeans_model.labels_).value_counts() #k-means
r2 = pd.Series(agglo_model.labels_).value_counts()
r3 = pd.Series(dbscan_model.labels_).value_counts()

# 2.导入TSNE并进行数据降维
from sklearn.manifold import TSNE
tsne = TSNE()
#进行数据降维
tsne.fit_transform(data_zs)
#转换成DataFrame数据格式
tsne_data = pd.DataFrame(tsne.embedding_, index = data_zs.index)

# 3.绘制三种算法的聚类结果
# model为模型,r为类别的数目
def draw(model,r,colors,filename):
    for i in range(max(r1.index)+1):
        d = tsne_data[model.labels_ == i]
        plt.plot(d[0],d[1],colors[i]+'.')
    plt.show()
    plt.savefig("step6/out/"+filename)
    plt.close()
draw(kmeans_model,r1,list('rgbykmc'),"kmeans.jpg")
# 用与kmeans_model同样的方法绘制agglo_model,filename为"agglomerativeClustering.jpg"
#********** Begin **********#
def draw(model,r,colors,filename):
    for i in range(max(r1.index)+1):
        d = tsne_data[model.labels_ == i]
        plt.plot(d[0],d[1],colors[i]+'.')
    plt.show()
    plt.savefig("step6/out/"+filename)
    plt.close()
draw(agglo_model,r1,list('rgbykmc'),"agglomerativeClustering.jpg")
#********** End **********#
# 绘制dbscan_model,要求colors=list('gbymkc'),filename为"dbscan.jpg",且将labels=-1的值绘制成红色。
#********** Begin **********#
def draw(model,r,colors,filename):
    for i in range(max(r1.index)+1):
        d = tsne_data[model.labels_ == i]
        plt.plot(d[0],d[1],colors[i]+'.')
    plt.show()
    plt.savefig("step6/out/"+filename)
    plt.close()
draw(dbscan_model,r1,list('gbymkc'),"dbscan.jpg")


# #********** End **********#

# 4.绘制人群特点
for i in ['R','F','M']:
    for j in range(max(r1.index)+1):
        #********** Begin **********#
        draw(kmeans_model,r1,list('rgbykmc'),"kmeans.jpg")
        draw(agglo_model,r1,list('rgbykmc'),"agglomerativeClustering.jpg")
        draw(dbscan_model,r1,list('gbymkc'),"dbscan.jpg")
        #********** End **********#
    plt.show()
    plt.savefig("step6/out/"+i+".jpg")
    plt.close()

实验3-聚类分析(高级)

第1关:什么是质心

#encoding=utf8
import numpy as np

#计算样本间距离
def distance(x, y, p=2):
    '''
    input:x(ndarray):第一个样本的坐标
          y(ndarray):第二个样本的坐标
          p(int):等于1时为曼哈顿距离,等于2时为欧氏距离
    output:distance(float):x到y的距离      
    '''
    #********* Begin *********#    
    dis2 = np.sum(np.abs(x-y)**p)
    dis = np.power(dis2,1/p)
    return dis
    #********* End *********#
#计算质心
def cal_Cmass(data):
    '''
    input:data(ndarray):数据样本
    output:mass(ndarray):数据样本质心
    '''
    #********* Begin *********#
    Cmass = np.mean(data,axis=0)
    #********* End *********#
    return Cmass
#计算每个样本到质心的距离,并按照从小到大的顺序排列
def sorted_list(data,Cmass):
    '''
    input:data(ndarray):数据样本
          Cmass(ndarray):数据样本质心
    output:dis_list(list):排好序的样本到质心距离
    '''
    #********* Begin *********#
    dis_list = []
    for i in range(len(data)):
        dis_list.append(distance(Cmass,data[i][:]))
    dis_list = sorted(dis_list)
    #********* End *********#
    return dis_list

第2关:动手实现k-均值

#encoding=utf8
import numpy as np

# 计算一个样本与数据集中所有样本的欧氏距离的平方
def euclidean_distance(one_sample, X):
    '''
    input:
        one_sample(ndarray):单个样本
        X(ndarray):所有样本
    output:
        distances(ndarray):单个样本到所有样本的欧氏距离平方
    '''
    #*********Begin*********#
    one_sample = one_sample.reshape(1, -1)
    distances = np.power(np.tile(one_sample, (X.shape[0], 1)) - X, 2).sum(axis=1)
    #*********End*********#
    return distances

# 从所有样本中随机选取k个样本作为初始的聚类中心
def init_random_centroids(k,X):
    '''
    input:
        k(int):聚类簇的个数
        X(ndarray):所有样本
    output:
        centroids(ndarray):k个簇的聚类中心
    '''
    #*********Begin*********#
    n_samples, n_features = np.shape(X)
    centroids = np.zeros((k, n_features))
    for i in range(k):
        centroid = X[np.random.choice(range(n_samples))]
        centroids[i] = centroid
    #*********End*********#
    return centroids

# 返回距离该样本最近的一个中心索引
def _closest_centroid(sample, centroids):
    '''
    input:
        sample(ndarray):单个样本
        centroids(ndarray):k个簇的聚类中心
    output:
        closest_i(int):最近中心的索引
    '''
    #*********Begin*********#
    distances = euclidean_distance(sample, centroids)
    closest_i = np.argmin(distances)
    #*********End*********#
    return closest_i

# 将所有样本进行归类,归类规则就是将该样本归类到与其最近的中心
def create_clusters(k,centroids, X):
    '''
    input:
        k(int):聚类簇的个数
        centroids(ndarray):k个簇的聚类中心
        X(ndarray):所有样本
    output:
        clusters(list):列表中有k个元素,每个元素保存相同簇的样本的索引
    '''
    #*********Begin*********#
    clusters = [[] for _ in range(k)]
    for sample_i, sample in enumerate(X):
        centroid_i = _closest_centroid(sample, centroids)
        clusters[centroid_i].append(sample_i)
    #*********End*********#
    return clusters

# 对中心进行更新
def update_centroids(k,clusters, X):
    '''
    input:
        k(int):聚类簇的个数
        X(ndarray):所有样本
    output:
        centroids(ndarray):k个簇的聚类中心
    '''
    #*********Begin*********#
    n_features = np.shape(X)[1]
    centroids = np.zeros((k, n_features))
    for i, cluster in enumerate(clusters):
        centroid = np.mean(X[cluster], axis=0)
        centroids[i] = centroid
    #*********End*********#
    return centroids

# 将所有样本进行归类,其所在的类别的索引就是其类别标签
def get_cluster_labels(clusters, X):
    '''
    input:
        clusters(list):列表中有k个元素,每个元素保存相同簇的样本的索引
        X(ndarray):所有样本
    output:
        y_pred(ndarray):所有样本的类别标签
    '''
    #*********Begin*********#
    y_pred = np.zeros(np.shape(X)[0])
    for cluster_i, cluster in enumerate(clusters):
        for sample_i in cluster:
            y_pred[sample_i] = cluster_i
    #*********End*********#
    return y_pred

# 对整个数据集X进行Kmeans聚类,返回其聚类的标签
def predict(k,X,max_iterations,varepsilon):
    '''
    input:
        k(int):聚类簇的个数
        X(ndarray):所有样本
        max_iterations(int):最大训练轮数
        varepsilon(float):最小误差阈值
    output:
        y_pred(ndarray):所有样本的类别标签
    '''
    #*********Begin*********#
    # 从所有样本中随机选取k样本作为初始的聚类中心
    centroids = init_random_centroids(k,X)
    # 迭代,直到算法收敛(上一次的聚类中心和这一次的聚类中心几乎重合)或者达到最大迭代次数
    for _ in range(max_iterations):
        # 将所有进行归类,归类规则就是将该样本归类到与其最近的中心
        clusters = create_clusters(k,centroids, X)
        former_centroids = centroids
        # 计算新的聚类中心
        centroids = update_centroids(k,clusters, X)
        # 如果聚类中心几乎没有变化,说明算法已经收敛,退出迭代
        diff = centroids - former_centroids
        if diff.any() < varepsilon:
            break
    y_pred = get_cluster_labels(clusters, X)
    #*********End*********#
    return y_pred

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值