逻辑斯蒂回归 Logistic Regression(LR)的python实现

LR有两种实现方式,IRLS和GradDes ,前者是基于Newton–Raphson algorithm更新参数的,后者是梯度下降。
代码在Github上

IRLS :

iteratively reweighted least squares, using Newton–Raphson algorithm to update.

realized according to PRML section 4.3.3
代码:

# -*- coding:utf-8 -*-  

import numpy as np
import matplotlib.pyplot as plt
import csv
from sklearn import preprocessing

def load_data(filename,add_one = True):
    file = open(filename)
    csv_file = csv.reader(file)
    temp = next(csv_file)
    n_samples = int(temp[0]); n_features = int(temp[1])
    add = 1
    if not add_one:
        add = 0
    datamat = np.empty((n_samples,n_features+add),dtype = np.float64)
    labelmat = np.empty((n_samples,),dtype = np.int)
    if add_one:
        datamat[:,0] = 1
    for i,line in enumerate(csv_file):
        datamat[i,add:] = line[:-1]
        labelmat[i] = line[-1]
    return datamat,labelmat

def sigmoid(inX):
    return 1.0/(1+np.exp(-inX))

def fit(X,t):
    n_samples = X.shape[0];n_feature = X.shape[1]
    X = np.mat(X);t = t.reshape(n_samples,1)
    w = np.zeros((n_feature,1))
    iter_num = 50
    for i in range(iter_num):
        y = sigmoid(X*w)
        R = np.mat(np.eye(n_samples))
        for j in range(n_samples):
            R[j,j] = y[j]*(1-y[j])
        z = X*w - R.I*(y-t)
        w = (X.T*R*X).I*X.T*R*z
    return w

if __name__ == '__main__':
    X,y = load_data('../../datasets/data/LRSet.csv')
    X[:,1:] = preprocessing.scale(X[:,1:])
    plt.figure(1, figsize=(8,6))
    color = np.array(['b']*X.shape[0])
    color[y==1] = 'r'
    plt.scatter(X[:,1],X[:,2],c=color)

    w = fit(X,y)
    x_min = X[:,1].min()-0.5;x_max = X[:,1].max()+0.5
    print x_min,x_max

    def line(x0):
        # w0+w1*x+w2*y=0  =>  y = (-w0-w1*x)/w2
        return (-w[0,0]-w[1,0]*x0)/w[2,0]
    print line(x_min)
    plt.plot([x_min,x_max],[line(x_min),line(x_max)])

    plt.show()

GradDes :

Gradient Descent, w(t+1) = w(t) - alpha * nabla(E)

realized according to PRML section 4.3.3

代码:

# -*- coding:utf-8 -*-
# http://www.cnblogs.com/darkknightzh/p/6117528.html color

import numpy as np 
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn import preprocessing

def get_data():
    centers = [[-5,3],[4,1.5]]
    X,y = make_blobs(n_samples=1000,centers=centers,random_state=40)
    color = np.array([''])
    transformation = [[0.4, 0.2], [-0.4, 1.2]]
    X = np.dot(X, transformation) + np.random.rand(1000,2)*2.8
    return X,y

def sigmoid(inX):
    return 1.0/(1+np.exp(-inX))

def fit(X,t):
    n_samples = X.shape[0]; n_features = X.shape[1]
    w = np.zeros(n_features)
    iter_num = 50; alpha = 0.05
    for i in range(iter_num):
        y = sigmoid(np.dot(X,w))
        w -= np.dot(X.T,y-t)
    return w

if __name__ == '__main__':
    X,y = get_data()
    X = preprocessing.scale(X)
    X = np.column_stack([[1]*X.shape[0],X])
    color = np.array(['r']*y.shape[0])
    color[y==1] = 'g'
    plt.figure(1,figsize=(8,6))
    plt.scatter(X[:,1],X[:,2],c=color)

    w = fit(X,y)
    y_min = X[:,2].min();y_max = X[:,2].max()

    def line(y0):
        # w0+w1*x+w2*y=0  =>  x = (-w0-w2*y)/w1
        return (-w[0]-w[2]*y0)/w[1]

    plt.plot([line(y_min),line(y_max)],[y_min,y_max])

    plt.show()
  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
逻辑回归是一种用于分类问题的机器学习算,其原理是基于线性回归的思想,通过使用逻辑函数(也称为sigmoid函数)将预测结果映射到[0,1]之间的概率值。以下是一个使用Python实现逻辑回归的示例代码: ```python import numpy as np import matplotlib.pyplot as plt # 定义sigmoid函数 def sigmoid(z): return 1 / (1 + np.exp(-z)) # 定义损失函数 def loss(h, y): return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() # 定义逻辑回归模型 class LogisticRegression: def __init__(self, lr=0.01, num_iter=100000, fit_intercept=True, verbose=False): self.lr = lr self.num_iter = num_iter self.fit_intercept = fit_intercept self.verbose = verbose def add_intercept(self, X): intercept = np.ones((X.shape[0], 1)) return np.concatenate((intercept, X), axis=1) def fit(self, X, y): if self.fit_intercept: X = self.add_intercept(X) # 初始化权重参数 self.theta = np.zeros(X.shape[1]) for i in range(self.num_iter): z = np.dot(X, self.theta) h = sigmoid(z) gradient = np.dot(X.T, (h - y)) / y.size self.theta -= self.lr * gradient if(self.verbose == True and i % 10000 == 0): z = np.dot(X, self.theta) h = sigmoid(z) print(f'loss: {loss(h, y)}') def predict_prob(self, X): if self.fit_intercept: X = self.add_intercept(X) return sigmoid(np.dot(X, self.theta)) def predict(self, X, threshold=0.5): return self.predict_prob(X) >= threshold # 示例:使用逻辑回归对二分类数据进行训练和预测 from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split # 生成二分类数据集 X, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, random_state=42) # 将数据集划分为训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # 创建逻辑回归模型并进行训练 model = LogisticRegression(lr=0.1, num_iter=300000) model.fit(X_train, y_train) # 在测试集上进行预测 y_pred = model.predict(X_test) # 计算准确率 accuracy = (y_pred == y_test).mean() print(f'Accuracy: {accuracy}') ``` 这段代码首先定义了sigmoid函数和损失函数,然后实现了一个LogisticRegression类,其中包含了fit方用于训练模型,以及predict_prob和predict方用于预测。最后,示例代码使用sklearn库生成了一个二分类数据集,将数据集划分为训练集和测试集,并使用逻辑回归模型进行训练和预测。最后计算了预测的准确率。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值