Machine Learning ——Homework5

# 机器学习练习5 偏差与方差
import numpy as np
import scipy.io as sio
import scipy.optimize as opt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def load_data():# 接收数据并把它们展平
    d = sio.loadmat('E:\PyCharm\数据\ex5data1.mat')
    return map(np.ravel,[d['X'],d['y'],d['Xval'],d['yval'],d['Xtest'],d['ytest']])
X,y, Xval, yval,Xtest, ytest = load_data()
df = pd.DataFrame({'water_level':X, 'flow':y})
sns.lmplot('water_level','flow',data=df, fit_reg=False, size=7)
# plt.show()
X , Xval, Xtest = [np.insert(x.reshape(x.shape[0],1),0,np.ones(x.shape[0]),axis=1) for x in (X, Xval,Xtest)]
def cost(theta, X, y):
    m = X.shape[0]
    inner = X@theta - y
    square_sum = inner.T @ inner
    cost = square_sum /(2*m)
    return cost
def gradient (theta , X, y):
    m = X.shape[0]
    inner = X.T @ (X @ theta -y)
    return inner/m
def regularized_gradient(theta ,X,y,l=1):
    m = X.shape[0]
    regularized_term = theta.copy()
    regularized_term[0] = 0
    regularized_term = (1/m)*regularized_term
    return gradient(theta, X, y)+ regularized_term
def linear_regression_np(X, y, l=1):
    theta = np.ones(X.shape[1])
    res = opt.minimize(fun=regularized_cost,x0=theta,args=(X,y,1),method='TNC',jac=regularized_gradient,options={'disp':True})
    return  res
def regularized_cost(theta, X, y, l=1):
    m = X.shape[0]
    regularized_term = (1/(2*m))*np.power(theta[1:],2).sum()
    return cost(theta, X, y)+regularized_term
theta = np.ones(X.shape[0])
final_theta = linear_regression_np(X,y,l=0).get('x')
b = final_theta[0]
m = final_theta[1]
plt.scatter(X[:,1],y , label="Training data")
plt.plot(X[:,1],X[:,1]*m+b,label="Prediction")
plt.legend(loc = 2)
# plt.show()

training_cost,cv_cost =[],[]
m=X.shape[0]
for i in range(1,m+1):
    res = linear_regression_np(X[:i,:],y[:i],l=0)
    tc = regularized_cost(res.x, X[:i,:],y[:i],l=0)
    cv = regularized_cost(res.x,Xval,yval,l=0)
    training_cost.append(tc)
    cv_cost.append(cv)
plt.plot(np.arange(1, m+1), training_cost, label='training cost')
plt.plot(np.arange(1, m+1), cv_cost, label='cv cost')
plt.legend(loc=1)
plt.show()

def prepare_poly_data(*args,power):
    def prepare(x):
        df = poly_features(x,power=power)
        ndarr = normalize_feature(df).as_matrix()
        return np.insert(ndarr,0,np.ones(ndarr.shape[0]),axis=1)
    return [prepare(x)for x in args]
def poly_features(x,power, as_ndarray = False):
    data = {'f{}'.format(i):np.power(x,i)for i in range(1,power +1)}
    df = pd.DataFrame(data)# 生成一个列表
    return df.as_matrix() if as_ndarray else df
X,y,Xval,yval,Xtest,ytest = load_data()
print(poly_features(X, power=3))

def normalize_feature(df):
    return df.apply(lambda column:(column - column.mean())/column.std())
X_poly ,Xval_poly ,Xtest_poly=prepare_poly_data(X,Xval,Xtest,power=8)

l_candidate = [0,0.001,0.003,0.01,0.03,0.1,0.3,1,3,10]
training_cost, cv_cost = [],[]
for l in l_candidate:
    res = linear_regression_np(X_poly,y,l)
    tc = cost(res.x,X_poly,y)
    cv = cost(res.x,Xval_poly,yval)
    training_cost.append(tc)
    cv_cost.append(cv)
plt.plot(l_candidate, training_cost, label='training')
plt.plot(l_candidate, cv_cost, label='cross validation')
plt.legend(loc=2)
plt.xlabel('lambda')
plt.ylabel('cost')
plt.show()
l_candidate[np.argmin(cv_cost)]
for l in l_candidate:
    theta = linear_regression_np(X_poly,y,1).x
    print('test cost(1={}) = {}'.format(1,cost(theta,Xtest_poly,ytest)))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值