【机器学习】逻辑回归

逻辑回归

1.什么是逻辑回归

对于算法而言,并不是越复杂越好,要根据实际的使用场景,来选择对应的算法
对于CNN,RNN而言,因为对数据的要求很高,需要非常大的数据量,对计算力的要求也很高

 

如果不进行预测的话,就是回归问题,如果进行预测了,可以看做分类问题
通过样本的特征,来拟合计算出一个事件发生的概率
通过一些方法的改进,可以使得逻辑回归作为多分类问题

#Sigmoid函数

import numpy as np
import matplotlib.pyplot as plt

##
def sigmod(t):
    return 1./(1 + np.exp(-t))

##
x = np.linspace(-10, 10, 500)
y = sigmod(x)
plt.plot(x, y)
plt.show()

 

2.逻辑回归的损失函数

如果真实值为1,但是p的值越小,越表明预测出来的值越接近0,则越偏离真实值

找到一种函数符合我们这种数学模型

得到最终的损失函数结构

3.逻辑回归损失函数的梯度

4.实现逻辑回归算法

###实现逻辑回归

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets

iris = datasets.load_iris()
#数据集内包含 3 类共 150 条记录,每类各 50 个数据,
#每条记录都有 4 项特征:花萼长度、花萼宽度、花瓣长度、花瓣宽度
#可以通过这4个特征预测鸢尾花卉属于
#(iris-setosa, iris-versicolour, iris-virginica)中的哪一品种。

##
X = iris.data
y = iris.target

##
X = X[y<2,:2]#取出y<2的那一行中的前两列数据
y = y[y<2]

##
plt.scatter(X[y==0, 0], X[y==0, 1], color = "red") 
#表示y为0的时候,X的两个特征值分别作为x,y轴的数据坐标
plt.scatter(X[y==1, 0], X[y==1, 1], color = "blue")
plt.show()

目前测试分类问题,横纵轴均作为特征

# from palyML.model_selection import train_test_split

import numpy as np

def train_test_split(X, y, test_ratio=0.2, seed=None):
    """将数据 X 和 y 按照test_ratio分割成X_train, X_test, y_train, y_test"""
    assert X.shape[0] == y.shape[0], \
        "the size of X must be equal to the size of y"
    assert 0.0 <= test_ratio <= 1.0, \
        "test_ration must be valid"

    if seed:
        np.random.seed(seed)

    shuffled_indexes = np.random.permutation(len(X))

    test_size = int(len(X) * test_ratio)
    test_indexes = shuffled_indexes[:test_size]
    train_indexes = shuffled_indexes[test_size:]

    X_train = X[train_indexes]
    y_train = y[train_indexes]

    X_test = X[test_indexes]
    y_test = y[test_indexes]

    return X_train, X_test, y_train, y_test


X_train, X_test, y_train, y_test = train_test_split(X, y, seed=666)


###########################################################################
import numpy as np

class LogisticRegression:
    
    def __init__(self):
        """初始化Linear Regression模型"""
        self.coef_ = None
        self.intercept_ = None
        self._theta = None        

    def _accuracy_score(slef, y_true, y_predict):
        """计算y_true和y_predict之间的准确率"""
        assert len(y_true) == len(y_predict), \
        "the size of y_true must be equal to the size of y_predict"
        return np.sum(y_true == y_predict) / len(y_true)

    def _sigmoid(self, t):
        return 1. / (1. + np.exp(-t))

    def fit(self, X_train, y_train, eta=0.01, n_iters=1e4):
        #"""根据训练数据集X_train, y_train, 使用梯度下降法训练Logistic Regression模型"""
        assert X_train.shape[0] == y_train.shape[0]
        #  "the size of X_train must be equal to the size of y_train"

        def J(theta, X_b, y):
            y_hat = self._sigmoid(X_b.dot(theta)) 
            try:
                return -1 * np.sum(y*np.log(y_hat) + (1-y)*np.log(1-y_hat)) / len(y)
            except:
                return float('inf')

        def dJ(theta, X_b, y):
            # res = np.empty(len(theta))
            # res[0] = np.sum(X_b.dot(theta) - y)
            # for i in range(1, len(theta)):
            #   res[i] = (X_b.dot(theta) - y).dot(X_b[:, i])
            # return res * 2 / len(X_b)
            return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(X_b)

        def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):

            theta = initial_theta
            cur_iter = 0
            
            while cur_iter < n_iters:
                gradient = dJ(theta, X_b, y)
                last_theta = theta
                theta = theta - eta * gradient
                if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
                    break
                    
                cur_iter += 1

            return theta

        X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
        initial_theta = np.zeros(X_b.shape[1])
        self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
        
        self.intercept_ = self._theta[0]
        self.coef_ = self._theta[1:]
        
        return self

    def predict_proba(self, X_predict):
        """给定待预测数据集X_predict,返回表示X_predict的结果概率向量"""
        assert self.intercept_ is not None and self.coef_ is not None, \
            "must fit before predict!"
        assert X_predict.shape[1] == len(self.coef_), \
            "the feature number of X_predict must be equal to X_train"

        X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
        return self._sigmoid(X_b.dot(self._theta))

    def predict(self, X_predict):
        """给定待预测数据集X_predict,返回表示X_predict的结果向量"""
        assert self.intercept_ is not None and self.coef_ is not None, \
        "must fit before predict!"
        assert X_predict.shape[1] == len(self.coef_), \
        "the feature number of X_predict must be equal to X_train"
        
        proba = self.predict_proba(X_predict)
        return np.array(proba >= 0.5, dtype = 'int')

    def score(self, X_test, y_test):
        """根据测试数据集 X_test 和 y_test 确定当前模型的准确度"""
        
        y_predict = self.predict(X_test)
        return self._accuracy_score(y_test, y_predict)
    
    def __repr__(self):
        return "LogisticRegression()"
    
    
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)

5.决策边界

def x2(x1):
    return (-log_reg.intercept_-log_reg.coef_[0]*x1) / log_reg.coef_[1]

x1_plot = np.linspace(4,8,1000)
x2_plot = x2(x1_plot)

plt.scatter(X[y==0, 0], X[y==0, 1], color = "red")
plt.scatter(X[y==1, 0], X[y==1, 1], color = "blue")
plt.plot(x1_plot, x2_plot)
plt.show()        #这里采用的是训练集的样本进行测试

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值