今日开始Logistic的补作业之旅~
一.逻辑回归
数据准备阶段
# 机器学习练习2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
path = 'E:\PyCharm\数据\ex2data1.txt'
data = pd.read_csv(path,header=None,names=['Exam 1','Exam 2','Admitted'])
print(data.head())
positive = data[data['Admitted'].isin([1])]
negative = data[data['Admitted'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['Exam 1'],positive['Exam 2'],s=50,c='b',marker='o',label='Admitted')
ax.scatter(negative['Exam 1'],negative['Exam 2'],s=50,c='r',marker='x',label='Not Admitted')
ax.legend()
ax.set_xlabel('Exam 1 Score')
ax.set_ylabel('Exam 2 Score')
plt.show()
二.sigmoid与代价函数模型及算法实现
代价函数
算法实现
def sigmoid(z):
return 1/(1 + np.exp(-z))
# 编写代价函数
def cost(theta,X,y):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y,np.log(sigmoid(X* theta.T)))
second = np.multiply((1-y),np.log(1-sigmoid(X*theta.T)))
return np.sum(first-second)/(len(X))
data.insert(0,'Ones',1)
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
X = np.array(X.values)
y = np.array(y.values)
theta =np.zeros(3)
print(cost(theta,X,y))#输出0.69314718056
三.gradient descent (梯度下降)
def gradient(theta,X,y):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int (theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X*theta.T)-y
for i in range (parameters):
term = np.multiply(error, X[:,i])
grad[i] = np.sum(term)/len(X)
return grad
# 寻找最优参数
import scipy.optimize as opt
result = opt.fmin_tnc(func=cost,x0=theta,fprime=gradient,args=(X,y))
# 预测训练精度
def predict(theta,X):
probability = sigmoid(X*theta.T)
return [1 if x>=0.5 else 0 for x in probability]
theta_min = np.matrix(result[0])
predictions = predict(theta_min, X)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy))
四.regularized cost(正则化代价函数)
def cost(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
return np.sum(first - second) / len(X) + reg
def gradientReg(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:,i])
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
return grad
五.sklearn线性回归包的应用
from sklearn import linear_model
model = linear_model.LogisticRegression(penalty='12',C=1.0)
model.fit(X2,y2.ravel())
model.score(X2,y2)#输出0.66101694915