[机器学习]逻辑回归源码(python)

逻辑回归的具体细节和sklearn的实现可以参考前面的文章:sklearn的实现
本文接下来回推倒逻辑回归的由来和梯度推导.最后会附上详细的源码细节,有比较详细的注释.
在这里插入图片描述

import numpy as np
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import  train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import normalize

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

class LogisticRegression():
    """
        Parameters:
        -----------
        n_iterations: int
            梯度下降的轮数
        learning_rate: float
            梯度下降学习率
    """
    def __init__(self, learning_rate=.1, n_iterations=4000):
        self.learning_rate = learning_rate
        self.n_iterations = n_iterations
        
    def initialize_weights(self, n_features):
        limit = np.sqrt(1 / n_features)
        w = np.random.uniform(-limit, limit, (n_features, 1))
        #把偏置加到w的矩阵中
        b = 0
        self.w = np.insert(w, 0, b, axis=0)
        
    def fit(self, X, y):
        n_samples, n_features = X.shape
        self.initialize_weights(n_features)
        #x要加多1列才能保持输出shape不变
        X = np.insert(X, 0, 1, axis=1)
        y = np.reshape(y, (n_samples, 1))
        
        
        for i in range(self.n_iterations):
            h = X.dot(self.w)
            y_pred = sigmoid(h)
            #公式推倒后的J(w)的梯度,推倒结果和线性回归很像.
            #若果是y - y_pred 的话后面 self.w = self.w + self.learning_rate * w_grad
            w_grad = X.T.dot(y_pred - y)
            self.w = self.w - self.learning_rate * w_grad
            #w_grad = X.T.dot(y -  y_pred )
            #self.w = self.w + self.learning_rate * w_grad
        
        
    def predict(self, X):
        X = np.insert(X, 0, 1, axis = 1)
        #np.round四舍五入,小于0.5等于0,大于0.5等于1
        y_pred = np.round(sigmoid(X.dot(self.w)))
        return y_pred.astype(int)
    
    
if __name__ == '__main__':
    data = datasets.load_iris()
    X = normalize(data.data[data.target != 0])
    y = data.target[data.target != 0]
    y[y == 1] = 0
    y[y == 2] = 1

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state = 1)

    clf = LogisticRegression()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)
    y_pred = np.reshape(y_pred, y_test.shape)

    accuracy = accuracy_score(y_test, y_pred)
    print("Accuracy:", accuracy)
    

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值