我的机器学习第一个算法-逻辑回归
读懂本算法,需要你具备矩阵求导及相关基本运算、概率论中的极大似然函数求解、高等数学的求偏导数,以及python的工具包pandas、numpy、matplotlib的基本使用(非常感谢B站唐宇迪老师的讲解,虽然跟的磕磕巴巴哈),上代码:
# # 逻辑回归(二分类)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
pdData = pd.read_csv('LogiReg_data.txt', header=None, names=['Exam1',
'Exam2', 'Admitted'])
pdData.head()
positive = pdData[pdData['Admitted'] == 1]
negative = pdData[pdData['Admitted'] == 0]
fix, ax = plt.subplots(figsize=(10, 5))
ax.scatter(positive['Exam1'], positive['Exam2'], s=30, c='b',
marker='o', label='Admitted')
ax.scatter(negative['Exam1'], negative['Exam2'], s=30, c='r',
marker='x', label='Not Admitted')
ax.legend()
ax.set_xlabel('Exam 1 Score')
ax.set_ylabel('Exam 2 Score')
def sigmoid(z):
return 1 / (1 + np.exp(-z))
nums = np.arange(-10, 10, step=1)
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(nums, sigmoid(nums), 'r')
def model(X, theta):
return sigmoid(np.dot(X, theta.T))
# 添加‘1’列
pdData.insert(0, 'Ones', 1)
pdData.head()
# 转换pandas数据为有用数组
orig_data = pdData.as_matrix()
cols = orig_data.shape[1]
# 处理X。y值
X = orig_data[:, 0:cols-1]
y = orig_data[:, cols-1:cols]
# 构造三个参数
theta = np.zeros([1, 3])
# 构造似然对数函数
def cost(X, y, theta):
left = np.multiply(-y, np.log(model(X, theta)))
right = np.multiply(1 - y, np.log(1 - model(X, theta)))
return np.sum(left - right) / (len(X))
# In[35]:
# 将梯度上升转换为梯度下降
# In[47]:
print(cost(X, y, theta))
# # 计算梯度
# In[48]:
def gradient(X, y, theta):
grad = np.zeros(theta.shape)
error = (model(X, theta) - y).ravel()
for j in range(len(theta.ravel())):
term = np.multiply(error, X[:, j])
grad[0, j] = np.sum(term) / len(X)
return grad
# # Gradient descent
# In[69]:
STOP_ITER = 0
STOP_COST = 1
STOP_GRAD = 2
def stopGriterion(type, value, threshold):
# 三重不同的停止策略
if type == STOP_ITER:
return value > threshold
elif type == STOP_COST:
return abs(value[-1]-value[-2]) < threshold
elif type == STOP_GRAD:
return np.linalg.norm(value) < threshold
# In[67]:
import numpy.random
# 洗牌
def shuffleData(data):
np.random.shuffle(data)
cols = data.shape[1]
X = data[:, 0:cols-1]
y = data[:, cols-1:]
return X, y
# In[71]:
import time
def descent(data, theta, batchSize, stopType, thresh, alpha):
# 梯度下降求解
init_time = time.time()
i = 0 # 迭代次数
k = 0 # batch
X, y = shuffleData(data)
grad = np.zeros(theta.shape) #计算的梯度
costs = [cost(X, y, theta)] # 损失值
while True:
grad = gradient(X[k:k+batchSize], y[k:k+batchSize], theta)
k += batchSize # 取batch数量个数据
if k >= n:
k = 0
X, y = shuffleData(data) # 重新洗牌
theta = theta - alpha*grad # 参数更新
costs.append(cost(X, y, theta)) # 计算新的损失
i += 1
if stopType == STOP_ITER:
value = i
elif stopType == STOP_COST:
value = costs
elif stopType == STOP_GRAD:
value = grad
if stopGriterion(stopType, value, thresh):
break;
return theta, i-1, costs, grad, time.time() - init_time
# In[59]:
def runExpe(data, theta, batchSize, stopType, thresh, alpha):
#import pdb; pdb.set_trace();
theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha)
name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled"
name += " data - learning rate: {} - ".format(alpha)
if batchSize==n: strDescType = "Gradient"
elif batchSize==1: strDescType = "Stochastic"
else: strDescType = "Mini-batch ({})".format(batchSize)
name += strDescType + " descent - Stop: "
if stopType == STOP_ITER: strStop = "{} iterations".format(thresh)
elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh)
else: strStop = "gradient norm < {}".format(thresh)
name += strStop
print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(
name, theta, iter, costs[-1], dur))
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(np.arange(len(costs)), costs, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title(name.upper() + ' - Error vs. Iteration')
return theta
# # 不同的停止策略
# 设定迭代次数
# In[73]:
#选择的梯度下降方法是基于所有样本的
n=100
runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001)
# 根据损失值停止
# 设定阈值 1E-6, 差不多需要110 000次迭代
# In[77]:
runExpe(orig_data, theta, n, STOP_COST, thresh=0.000001, alpha=0.001)
# ## 根据梯度变化停止
# 设定阈值 0.05,差不多需要40 000次迭代
# In[79]:
runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001)
# # 不同的梯度下降策略
# ## Stochastic descent
# In[81]:
runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001)
# 改小学习率,增加迭代次数,解决上诉问题
# In[83]:
runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002)
# ## batch梯度下降策略
# In[85]:
runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001)