【机器学习】logistic回归的python实现

导入包

import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing as pp
%matplotlib inline

读取文件

import os
path = './data' + os.sep + 'LogiReg_data.txt'
pdData = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted'])
pdData.head()
Exam 1Exam 2Admitted
34.62366078.0246930
30.28671143.8949980
35.84740972.9021980
60.18259986.3085521
79.03273675.3443761

处理数据

positive = pdData[pdData['Admitted'] == 1]
negative = pdData[pdData['Admitted'] == 0]

pdData.insert(0, 'Ones', 1)
orig_data = pdData.as_matrix()
cols = orig_data.shape[1]
X = orig_data[:,0:cols-1]
y = orig_data[:,cols-1:cols]
theta = np.zeros([1, cols - 1])

scaled_data = orig_data.copy()
scaled_data[:, 1:3] = pp.scale(orig_data[:, 1:3])

logistic 回归

目标:建立分类器, 求解出三个参数 θ 0   θ 1   θ 2 \theta_0 \ \theta_1\ \theta_2 θ0 θ1 θ2

设定阈值,根据阈值判断录取结果

要完成的模块

  • sigmoid : 映射到概率的函数

  • model : 返回预测结果值

  • cost : 根据参数计算损失

  • gradient : 计算每个参数的梯度方向

  • descent : 进行参数更新

  • accuracy: 计算精度

sigmoid函数

g ( z ) = 1 1 + e − z g(z) = \frac{1}{1+e^{-z}} g(z)=1+ez1

def sigmoid(z):
    return 1 / (1 + np.exp(-z))

model函数

( 1 x 1 x 2 ) × ( θ 0 θ 1 θ 2 ) = θ 0 + θ 1 x 1 + θ 2 x 2 \begin{array}{ccc} \begin{pmatrix}1 & x_{1} & x_{2}\end{pmatrix} & \times & \begin{pmatrix}\theta_{0}\\ \theta_{1}\\ \theta_{2} \end{pmatrix}\end{array}=\theta_{0}+\theta_{1}x_{1}+\theta_{2}x_{2} (1x1x2)×θ0θ1θ2=θ0+θ1x1+θ2x2

def model(X, theta):
    return sigmoid(np.dot(X, theta.T))

cost函数

将对数似然函数去负号

D ( h θ ( x ) , y ) = − y log ⁡ ( h θ ( x ) ) − ( 1 − y ) log ⁡ ( 1 − h θ ( x ) ) D(h_\theta(x), y) = -y\log(h_\theta(x)) - (1-y)\log(1-h_\theta(x)) D(hθ(x),y)=ylog(hθ(x))(1y)log(1hθ(x))
求平均损失
J ( θ ) = 1 n ∑ i = 1 n D ( h θ ( x i ) , y i ) J(\theta)=\frac{1}{n}\sum_{i=1}^{n} D(h_\theta(x_i), y_i) J(θ)=n1i=1nD(hθ(xi),yi)

def cost(X, y, theta):
    left = - np.multiply(y, np.log(model(X, theta)))
    right = np.multiply(1-y, np.log(1 - model(X, theta)))
    return np.sum(left - right) / len(X)
cost(X, y, theta)

0.6931471805599453

gradient函数

∂ J ∂ θ j = − 1 m ∑ i = 1 m ( y i − h θ ( x i ) ) x i j \frac{\partial J}{\partial \theta_j}=-\frac{1}{m}\sum_{i=1}^m (y_i - h_\theta (x_i))x_{ij} θjJ=m1i=1m(yihθ(xi))xij

def gradient(X, y, theta):
    grad = np.zeros(theta.shape)
    error = (model(X, theta)- y).ravel()
    for j in range(len(theta.ravel())):
        term = np.multiply(error, X[:,j])
        grad[0,j] = np.sum(term)/len(X)
    return grad

descent函数

STOP_ITER = 0
STOP_COST = 1
STOP_GRAD = 2

def stopCriterion(type, value, threshold):
    #设定三种不同的停止策略
    if type == STOP_ITER:        return value > threshold
    elif type == STOP_COST:      return abs(value[-1]-value[-2]) < threshold
    elif type == STOP_GRAD:      return np.linalg.norm(value) < threshold

#洗牌
def shuffleData(data):
    np.random.shuffle(data)
    cols = data.shape[1]
    X = data[:, 0:cols-1]
    y = data[:, cols-1:]
    return X, y

def descent(data, theta, batchSize, stopType, thresh, alpha):
    
#     init_time = time.time()
    i = 0
    k = 0
    X, y = shuffleData(data)
    grad = np.zeros(theta.shape)
    costs = [cost(X, y, theta)]
    
    while True:
        grad = gradient(X[k:k+batchSize], y[k:k+batchSize], theta)
        k += batchSize
        if k >= n:
            k = 0
            X, y = shuffleData(data)
        theta = theta - alpha*grad
        costs.append(cost(X, y, theta))
        i += 1
        
        if stopType == STOP_ITER:       value = i
        elif stopType == STOP_COST:     value = costs
        elif stopType == STOP_GRAD:     value = grad
        if stopCriterion(stopType, value, thresh): break
            
#     return theta, i-1, costs, grad, time.time() - init_time
    return theta
n = 100 # n = 1时是随机梯度下降
theta = descent(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001)

accuracy函数

# 设定阈值
def predict(X, theta):
    return [1 if x >= 0.5 else 0 for x in model(X, theta)]

scaled_X = scaled_data[:, :3]
y = scaled_data[:, 3]
predictions = predict(scaled_X, theta)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy))

accuracy = 82%

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值