【逻辑回归】

在线性回归的基础上,使用梯度下降的方法降低损失值,找到最优参数,另外需要在最后加上softmax函数。

import numpy as np

class LogisticRegression():
    def __init__(self):
        self._theta = None
        self._coef = None
        self._intercept = None

    def _sigmoid(self, t):
        return 1. / (1 + np.exp(-t))

    def fit(self, X_train, y_train, eta=0.01, n_iters=10000):
        def J(theta, X_b, y): # 交叉熵损失函数
            y_hat = self._sigmoid(X_b.dot(theta))
            try:
                return - np.sum(y * np.log(y_hat) + (1-y) * np.log(1-y_hat)) / len(y)
            except:
                return float("inf")
        
        def dJ(theta, X_b, y):
            return (self._sigmoid(X_b.dot(theta)) - y).dot(X_b) / len(y)

        def gradient_descent(X_b, y, initial_theta, eta, n_iters, epsilon=1e-8):
            theta = initial_theta
            thetas = []
            cur_iter = 0
            while cur_iter < n_iters:
                gradient = dJ(theta, X_b, y)
                last_theta = theta
                theta = theta - gradient * eta
                thetas.append(theta)
                # print(theta)
                if abs(J(theta, X_b, y)-J(last_theta, X_b, y)) <= epsilon:
                    break
                cur_iter += 1
            return theta
        
        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.zeros(X_b.shape[1])
        self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
        self._coef = self._theta[1:]
        self._intercept = self._theta[0]
        return self

    def predict_probability(self, X_predict):
        X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
        return self._sigmoid(X_b.dot(self._theta))

    def predict(self, X_predict):
        probability = self.predict_probability(X_predict)
        return np.array(probability >= 0.5, dtype='int')
    
    def score(self, y_true, y_predict):
        return np.sum(y_true == y_predict) / len(y_true)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值