吴恩达《机器学习》ex2

简介

本文是吴恩达《机器学习》课程中的习题二的python实现。习题二主要是二元分类问题,包含有学生录取数据集和芯片生产数据集。
本文所使用的数据集,可在文末的百度网盘链接中获取。

学生录取Part 代码

设想你是大学相关部分的管理者,想通过申请学生两次测试的评分,来决定他们是否被录取。
现在你拥有之前申请学生的可以用于训练逻辑回归的训练样本集。对于每一个训练样本,你有他们两次测试的评分和最后是被录取的结果。

# import libs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# read original data
data = pd.read_csv('../ex2/data/ex2data1.txt',header=None,names=['score1','score2','result'])
data.head()
# outputs
#	score1		score2		result
#0	34.623660	78.024693	0
#1	30.286711	43.894998	0
#2	35.847409	72.902198	0
#3	60.182599	86.308552	1
#4	79.032736	75.344376	1

# split data 
is_in = data[data['result'] == 1]
not_in = data[data['result'].isin([0])]

# plot our data
fig,ax = plt.subplots(figsize=(20,10))
ax.scatter(is_in['score1'],is_in['score2'],s=50,c='b',marker='o',label='true')
ax.scatter(not_in['score1'],not_in['score2'],s=50,c='r',marker='x',label='false')
ax.legend
ax.set_xlabel('score1')
ax.set_ylabel('score2')
plt.show()
# outputs

data layout

# use logistic regression
# define active fucntion, use sigmod here
def sigmoid(x):
    return 1/(1 + np.exp(-x))

# define cost function without regularization term
def cost_function(theta,X,y):
    X = np.matrix(X)
    y = np.matrix(y)
    theta = np.matrix(theta)
    y_pre = sigmoid(X * theta.T)
    first = np.multiply(-y,np.log(y_pre))
    second = np.multiply(1-y,np.log(1- y_pre))
    return np.sum(first-second)/len(X)

# define gradient_decent without vectorized implementation
def gradient(theta,X,y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    res = np.zeros(theta.ravel().shape[1])
    for i in range(int(theta.ravel().shape[1])):
        res[i] = np.sum(np.multiply(sigmoid(X * theta.T) - y, X[:,i]))/len(X)
    return res

# vectorized implementation
def gradient_vectorized(theta,X,y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    res = (sigmoid(X * theta.T) - y).T * X / len(X)
    return res

# get params ready
data.insert(0,'ones',1)
X = data.iloc[:,:-1]
cols = data.shape[1]
y = data.iloc[:,cols-1:cols]
theta = np.zeros(3)

# import lib
import scipy.optimize as opt
result = opt.fmin_tnc(func=cost_function, x0=theta, fprime=gradient, args=(X, y), approx_grad=False)
result
# outputs
# (array([-25.16131869,   0.20623159,   0.20147149]), 36, 0)

# plot decision boundary
plotting_x1 = np.linspace(30, 100, 100)
# X*theta.T = 0 is decision boundary. [[1,x1,x2]] * np.matrix(result[0]).T = 0
plotting_h1 = ( - result[0][0] - result[0][1] * plotting_x1) / result[0][2]

fig, ax = plt.subplots(figsize=(16,16))
ax.plot(plotting_x1, plotting_h1, 'y', label='Prediction')
ax.scatter(is_in['score1'],is_in['score2'],s=50,c='b',marker='o',label='true')
ax.scatter(not_in['score1'],not_in['score2'],s=50,c='r',marker='x',label='false')
ax.legend
ax.set_xlabel('score1')
ax.set_ylabel('score2')
plt.show()
# outputs

data1 decision boundary

# define prediction function
def predict(theta, X):
    probability = sigmoid(X * theta.T)
    return [1 if x >= 0.5 else 0 for x in probability]
# test model  
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y.values)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy))
# accuracy = 89%

生产数据Part 代码

设想你是工厂的生产主管,你有一些芯片在两次测试中的测试结果,测试结果决定是否芯片要被接受或抛弃。你有一些历史数据,帮助你构建一个逻辑回归模型。

# read data
path =  '../ex2/data/ex2data2.txt'
data_init = pd.read_csv(path, header=None, names=['Test 1', 'Test 2', 'Accepted'])

# plot data
positive2 = data_init[data_init['Accepted'].isin([1])]
negative2 = data_init[data_init['Accepted'].isin([0])]

fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive2['Test 1'], positive2['Test 2'], s=50, c='b', marker='o', label='Accepted')
ax.scatter(negative2['Test 1'], negative2['Test 2'], s=50, c='r', marker='x', label='Rejected')
ax.legend()
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')
plt.show()

data2 layout

# original data can not be split by linear classifier, so we need more features
degree = 6
data2 = data_init
x1 = data2['Test 1']
x2 = data2['Test 2']

data2.insert(3, 'Ones', 1)

for i in range(1, degree+1):
    for j in range(0, i+1):
        data2['F' + str(i-j) + str(j)] = np.power(x1, i-j) * np.power(x2, j)

data2.drop('Test 1', axis=1, inplace=True)
data2.drop('Test 2', axis=1, inplace=True)

# define cost funtion with regularization term
def costReg(theta, X, y, lambd):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    reg = (lambd / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
    return np.sum(first - second) / len(X) + reg

# define gradient with regularization term
def gradientReg(theta, X, y, lambd):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    parameters = int(theta.ravel().shape[1])
    grad = np.zeros(parameters)
    error = sigmoid(X * theta.T) - y
    for i in range(parameters):
        term = np.multiply(error, X[:,i])
        if (i == 0):
            grad[i] = np.sum(term) / len(X)
        else:
            grad[i] = (np.sum(term) / len(X)) + ((lambd / len(X)) * theta[:,i])
    return grad

# get params ready
cols = data2.shape[1]
X2 = data2.iloc[:,1:cols]
y2 = data2.iloc[:,0:1]
theta2 = np.zeros(cols-1)
X2 = np.array(X2.values)
y2 = np.array(y2.values)
# lambd is a measure of weights of regularization term. samll lambd causes overfitting and big one causes underfitting
lambd = 1

# fit model
result2 = opt.fmin_tnc(func=costReg, x0=theta2, fprime=gradientReg, args=(X2, y2, learningRate))
result2
# outputs
#	(array([ 1.27271027,  0.62529965,  1.18111686, -2.01987399, -0.91743189,
#       	 -1.43166928,  0.12393227, -0.36553118, -0.35725403, -0.17516292,
#       	 -1.4581701 , -0.05098418, -0.61558552, -0.27469165, -1.19271298,
#       	 -0.2421784 , -0.20603297, -0.04466179, -0.27778952, -0.29539514,
#       	 -0.45645981, -1.04319155,  0.02779373, -0.29244872,  0.01555761,
#       	 -0.32742406, -0.1438915 , -0.92467487]), 32, 1)

# plot decision boundary
# since we expand features to fit our model, we need to do the same thing when we try to find points lay on the boundary
def hfunc2(theta, x1, x2):
    temp = theta[0][0]
    place = 0
    for i in range(1, degree+1):
        for j in range(0, i+1):
            temp+= np.power(x1, i-j) * np.power(x2, j) * theta[0][place+1]
            place+=1
    return temp

def find_decision_boundary(theta):
    t1 = np.linspace(-1, 1.5, 1000)
    t2 = np.linspace(-1, 1.5, 1000)

    cordinates = [(x, y) for x in t1 for y in t2]
    x_cord, y_cord = zip(*cordinates)
    h_val = pd.DataFrame({'x1':x_cord, 'x2':y_cord})
    h_val['hval'] = hfunc2(theta, h_val['x1'], h_val['x2'])

    decision = h_val[np.abs(h_val['hval']) < 2 * 10**-3]
    return decision.x1, decision.x2

fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive2['Test 1'], positive2['Test 2'], s=50, c='b', marker='o', label='Accepted')
ax.scatter(negative2['Test 1'], negative2['Test 2'], s=50, c='r', marker='x', label='Rejected')
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')

x, y = find_decision_boundary(result2)
plt.scatter(x, y, c='y', s=10, label='Prediction')
ax.legend()
plt.show()

data2 decision boundary

获取数据集

链接: https://pan.baidu.com/s/1zteJBsMJ0GRwqRb5opOgwg 提取码: 78ah

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值