数据挖掘1-2周

1、正态分布(又称高斯分布)
在这里插入图片描述

import numpy as np
import matplotlib.pyplot as plt
import math

u=0  #u
sig=math.sqrt(0.2) 

x=np.linspace(u-3*sig,u+3*sig,50)  #在该区间等差出50个点
y_sig=np.exp(-(x-u)**2/(2*sig**2))/(math.sqrt(2*math.pi)*sig) #表示出表达式
#print(x)
#print("="*20)
#print(y_sig)
plt.plot(x,y_sig,"r-",linewidth=2) #线的格式和宽度
plt.grid(True) #带格子
plt.show()

2、泊松分布
在这里插入图片描述

import numpy as np
import matplotlib.pyplot as plt

x = np.random.poisson(lam=5,size=10000)  # lam是λ size是k
pillar = 15
a = plt.hist(x, pillar, color='g')
plt.plot(a[1][0:pillar],a[0],'r')
plt.grid()
plt.show()

3.两点分布:伯努利试验
概念:一次试验,若成功随机变量取值为1,成功概率为p;若失败随机变量取0,失败概率为1-p

4. 二项分布(n重伯努利分布)对于已知次数n,关心发生k次成功。
在这里插入图片描述
对于抛硬币的问题,做100次实验,观察其概率分布函数:

from scipy.stats import binom
import matplotlib.pyplot as plt
import numpy as np

fig, ax = plt.subplots(1, 1)
n = 100
p = 0.5
# ppf:累积分布函数的反函数。q=0.01时,ppf就是p(X<x)=0.01时的x值。
x = np.arange(binom.ppf(0.01, n, p), binom.ppf(0.99, n, p))
ax.plot(x, binom.pmf(x, n, p), 'o')
plt.show()

5.几何分布
在n次伯努利实验中,第k次实验才得到第一次成功的概率分布。
其中:P(k) = (1-p)^(k-1)*p

from scipy.stats import geom
import matplotlib.pyplot as plt
import numpy as np

fig, ax = plt.subplots(1, 1)
p = 0.5
#ppf:累积分布函数的反函数。q=0.01时,ppf就是p( X<x )=0.01时的x值。
x = np.arange(geom.ppf(0.01, p),geom.ppf(0.99, p))
ax.plot(x, geom.pmf(x, p),'o')
plt.show()

6.均匀分布
对于随机变量x的概率密度函数
在这里插入图片描述
则称随机变量X服从区间[a,b]上的均匀分布。

import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import uniform

fig, ax = plt.subplots(1, 1)
loc = 1
scale = 1
# ppf:累积分布函数的反函数。q=0.01时,ppf就是p(X<x)=0.01时的x值。
x = np.linspace(uniform.ppf(0.01, loc, scale), uniform.ppf(0.99, loc, scale), 100)
ax.plot(x, uniform.pdf(x, loc, scale), 'b-', label='uniform')
plt.show()

7.指数分布
在这里插入图片描述

import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import expon
fig, ax = plt.subplots(1, 1)
lambdaUse = 2
loc = 0
scale = 1.0 / lambdaUse

# ppf:累积分布函数的反函数。q=0.01时,ppf就是p(X<x)=0.01时的x值。
x = np.linspace(expon.ppf(0.01, loc, scale), expon.ppf(0.99, loc, scale), 100)
ax.plot(x, expon.pdf(x, loc, scale), 'b-', label='expon')
plt.show()

例1、统计阶乘结果第一位数出现的次数

import matplotlib.pyplot as plt

n = 1
frequency = [0] * 9

for i in range(1, 2000):
    n = n * i
    while n >= 10:
        n = n // 10  # n/10的结果向下取整
    frequency[n - 1] += 1
print(frequency)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.plot(x, frequency, "r-", lw=2)
plt.plot(x, frequency, 'go', markersize=8)
plt.xticks(x)
plt.grid(True)
plt.show()

求平均数、中位数、众数、对数组进行排序

from numpy import mean, median, sort
from scipy.stats import mode
a=[1,2,3,4,5,5,6]
print(mean(a))
print(median(a))
print(mode(a)[0])
print(sort(a))

创建矩阵

import numpy as np
b=np.arange(3,15)
b
b.reshape(3,4)

矩阵相乘

import numpy as np
b=np.arange(12,24).reshape([4,3])
a=np.arange(0,12).reshape([3,4])
p=np.dot(a,b)
p

例2、回归——女儿和母亲的身高关系
这道题不用训练,就是把线性关系那条线画出来即可
r的绝对值越接近于1,表示两个变量的线性相关性越强。当r为1是表示完全相关,当r=0时,表示完全不相关。当r为正数时,表示两个变量为正相关,当r为负数时,表示两个变量为负相关。

import numpy as np
import matplotlib.pyplot as plt
from numpy.core import mean

x=[154,157,158,159,160,161,162,163]
y=[155,156,159,162,161,164,165,166]

mx= mean(x)
my= mean(y)
bxy=0
bxx=0
for i in range(len(x)):
      bxy += x[i]*y[i]
      bxx += x[i]*x[i]
      print(bxy)

b=(bxy-8*mx*my)/(bxx-8*mx*mx)
a=my-b*mx
x1 = np.linspace(150,175, 100)
y1=b*x1+a
plt.plot(x,y,'go',markersize=8)
plt.plot(x1, y1, c='orange')
plt.grid(True)
plt.show()

例3、房屋价格预测

import pandas as pd
from io import StringIO
import matplotlib.pyplot as plt
from sklearn import linear_model
csv_data='square_feet,price\n150,6450\n200,7450\n250,8450\n300,9450\n350,11450\n400,15450\n600,18450\n'
df = pd.read_csv(StringIO(csv_data))
print(df)
regr = linear_model.LinearRegression()
regr.fit(df['square_feet'].values.reshape(-1,1),df['price'])
#a是斜率,b是截距
a, b = regr.coef_, regr.intercept_
plt.scatter(df['square_feet'], df['price'], color='blue') #散点图
plt.plot(df['square_feet'], regr.predict(df['square_feet'].values.reshape(-1, 1)), color='red', linewidth=4) #线图
plt.show()

例4、学习成绩与学习时间的预测

import matplotlib.pyplot as plt
from pandas import DataFrame
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split

examDict = {'学习时间': [0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50],
            '分数': [10, 22, 13, 43, 20, 22, 33, 50, 62, 48, 55, 75, 62, 73, 81, 76, 64, 82, 90, 93]}
examDf = DataFrame(examDict)
plt.scatter(examDf.分数, examDf.学习时间, color='b', label="Exam Data")
plt.xlabel("Hours")
plt.ylabel("Score")
plt.show()

#画出训练集和测试集的散点图
rDf = examDf.corr()
exam_X = examDf.分数
exam_Y = examDf.学习时间
X_train, X_test, Y_train, Y_test = train_test_split(exam_X, exam_Y, train_size=0.8)
plt.scatter(X_train, Y_train, color="blue", label="train data") 
plt.scatter(X_test, Y_test, color="red", label="test data")
plt.show()

#做线性回归的模型
model = LinearRegression()
X_train = X_train.values.reshape(-1, 1)
X_test = X_test.values.reshape(-1, 1)
model.fit(X_train, Y_train)
a = model.intercept_
b = model.coef_
print("最佳拟合线:截距", a, ",回归系数:", b)

#输出模型的训练结果
y_train_pred = model.predict(X_train)
plt.plot(X_train, y_train_pred, color="yellow", linewidth=3, label="best line")
plt.scatter(X_train, Y_train, color="blue", label="train data")
plt.scatter(X_test, Y_test, color="red", label="test data")
plt.show()
score=model.score(X_test, Y_test)

例5、多元回归

import pandas as pd
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#通过read_csv读数据集
adv_data = pd.read_csv("test4.csv")
#清洗不需要的数据
new_adv_data = adv_data.iloc[:,:]
#得到数据集且查看
print('head:',new_adv_data.head(),'\nShape:',new_adv_data.shape)

print(new_adv_data.describe())
#缺失值检验
print(new_adv_data[new_adv_data.isnull()==True].count())
new_adv_data.boxplot()
plt.show()

print(new_adv_data.corr())
X_train,X_test,Y_train,Y_test = train_test_split(new_adv_data.iloc[:,:3],new_adv_data.sales,train_size=.80)
print("原始数据特征:",new_adv_data.iloc[:,:3].shape, ",训练数据特征:",X_train.shape,",测试数据特征:",X_test.shape)
print("原始数据标签:",new_adv_data.sales.shape,",训练数据标签:",Y_train.shape,",测试数据标签:",Y_test.shape)

model = LinearRegression()
model.fit(X_train,Y_train)
a  = model.intercept_
b = model.coef_
print("最佳拟合线:截距",a,",回归系数:",b)

score = model.score(X_test,Y_test)
print(score)

Y_pred = model.predict(X_test)
print(Y_pred)
plt.plot(range(len(Y_pred)),Y_pred,'b',label="predict")
plt.show()

Y_pred = model.predict(X_test)
X_train,X_test,Y_train,Y_test = train_test_split(new_adv_data.iloc[:,:3],new_adv_data.sales,train_size=.80)

plt.figure()
plt.plot(range(len(Y_pred)),Y_pred,'b',label="predict")
plt.plot(range(len(X_test)),Y_test,'r',label="test")
plt.legend(loc="upper right")
plt.xlabel("the number of sales")
plt.ylabel('value of sales')
plt.show()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值