逻辑回归python正则化_基于python的numpy和pandas模块实现机器学习算法-逻辑回归

import pandas as pd

import numpy as np

import random

class LogisticRegression:

"""

逻辑回归:

1.随机生成梯度

2.计算损失值: z=X.W+b-->p_array=1/(1+e^-z)-->loss=-ylog(p)-(1-y)log(1-p)+regular_item

3.梯度优化

"""

def __init__(self, alpha=1, C=1.0, diff=1e-4):

self.alpha = alpha  # 学习率

self.C = C          # 正则化系数

self.diff = diff    # 前后损失差(决定是否终止梯度下降)

self.mean_cost = None

self.weight_bias = None

self.coef = None

self.intercept = None

self.m = None

self.n = None

def preprocess_data(self, X, y, test_size=0.35):

"""

1.划分数据为训练集和测试集

2.对数据进行标准化处理

:param X: 特征值

:param y: 目标值

:param test_size: 测试占总数据集的比例

:return: 返回X_train, X_test, y_train, y_test

"""

# 1.划分数据集

X_train, X_test, y_train, y_test = self.train_test_split(X, y, test_size=test_size)

# 2.对输入空间进行标准化处理

X_train, X_test = self.standardization([X_train, X_test])

return X_train, X_test, y_train, y_test

def standardization(self, two_arrays):

"""

std_result = x - mean / std

:param two_arrays: 可以是一个或者多个二维数组

:return: 返回标准化后结果数组列表

"""

arrays = [two_arrays] if isinstance(two_arrays, np.ndarray) else two_arrays

return [(array - np.mean(array, axis=0)) * (1 / np.std(array, axis=0)) for array in arrays]

def train_test_split(self, X, y, test_size=0.2):

# 根据指定的test_size占比划分数据集

m, n = X.shape

x_test_number = int(m * test_size)

# 根据x_test个数随机生成

test_index = random.sample(range(m), x_test_number)

train_index = list(set(range(m)) - set(test_index))

# 划分训练集和测试集

return X[train_index], X[test_index], y[train_index], y[test_index]

def transform_X(self, X):

"""X添加一列且值都为1,方便矩阵相乘,X*W+b--->[X,1]*[W,b]"""

m = X.shape[0]

bias_array = np.array([1] * m).reshape((-1, 1))

X = np.concatenate((X, bias_array), axis=1)

return X

def fit(self, X, y):

"""

1.对X,y进一步处理(1.1 给X添加一列且值都为1,为方便矩阵相乘,该列对应偏置 1.2.将X,y数组转为矩阵)

2.梯度下降求解(2.1 随机生成系数计算损失值 2.2 计算梯度 2.3 更新系数计算损失值 2.4.循环第二、三步 2.5.当达到终止条件停止)

:param X: 预处理过的特征值

:param y: 预处理过的目标值

:return: 返回类实例本身self

"""

# 1.对X, y转换处理(X-->(m,n), y-->(m,1))

X = self.transform_X(X)

y = np.array(y).reshape(-1, 1)

self.m, self.n = X.shape

# 2.梯度下降求解最小损失值--->最优系数(权重和偏置)

self.gradient_descent_optimization(X, y)

return self

def gradient_descent_optimization(self, X, y):

"""

1.更新权重和偏置(1.1 计算梯度grad 1.2 更新权重偏置weight_bias = weight_bias - alpha*grad)

2.计算损失函数值(2.1.求出预测值 2.2.计算sigmoid可能性 2.3.计算损失)

3.梯度下降优化(3.1 计算梯度 3.2.更新系数 3.3 计算损失 3.4.循环(当达到终于条件停止))

4.更新实例属性值(coef,intercept,,mean_cost)

:param X: 预处理过的特征值

:param y: 预处理过的目标值

:return: 返回类实例本身self

"""

# 1.随机生成系数(权重和偏置)(weight_bias-->(n,1))

weight_bias = np.random.randn(self.n, 1)

# 2.计算损失函数值(2.1.求出预测值 2.2.计算sigmoid可能性大小 2.3.计算对数似然损失值)

mean_cost = self.calc_cost(X, y, weight_bias)

# 3.梯度下降优化系数使得损失函数变小,循环迭代直到满足终止条件

pre_mean_cost = 0

cur_mean_cost = mean_cost

while abs(pre_mean_cost - cur_mean_cost) > self.diff:

# 3.1 计算梯度grad = , 即关于损失函数对系数w1,w2...求偏导

grad = self.calc_gradient(X, y, weight_bias)

# print(grad)

# 3.2 更新系数(权重和偏置)

weight_bias = weight_bias*(1-self.C*self.alpha/self.m) - self.alpha * grad

# print('第%s次迭代,weight_bias:%s' % (n, weight_bias))

# 3.3计算损失函数值(2.1.求出预测值 2.2.计算sigmoid可能性 2.3.计算对数似然损失)

mean_cost = self.calc_cost(X, y, weight_bias)

# 3.4 更新损失值:过去=现在 现在=当下(查看前后损失值的变化来决定是否停止迭代)

pre_mean_cost = cur_mean_cost

cur_mean_cost = mean_cost

# 4.更新实例属性值

self.weight_bias = weight_bias

self.coef, self.intercept = self.weight_bias[:-1].flatten(), self.weight_bias[-1].flatten()

self.mean_cost = mean_cost

return self

def calc_cost(self, X, y, weight_bias):

# 1.计算sigmoid: p = 1 / (1 + e^(-z))

p_array = self.calc_sigmoid(X, weight_bias)

# 2.计算损失值: -ylog(p)-(1-y)log(1-p) + regular_item

# 2.1.添加正则化项,减轻过拟合问题

regular_item = (self.C / 2 * self.m) * np.dot(weight_bias[:-1].T, weight_bias[:-1])

# 2.2确保y,p_array转换为1维数组,方便使用索引获取值

y_array, p_array = y.flatten(), p_array.flatten()

# 2.3.计算平均损失值cost = 1/m * (-y*log(p) - (1-y)*log(1-p)),避免p或1-p为零,添加1e-5

cost = lambda y, p: -y * np.log(p + 1e-5) - (1 - y) * np.log(1 - p + 1e-5)

mean_cost = 1 / self.m * sum([cost(y_array[i], p_array[i]) for i in range(self.m)]) + float(regular_item)

return mean_cost

def calc_sigmoid(self, X, weight_bias):

# 1.矩阵相乘:特征值(m,n) * 特征系数(n,1) = z ~(m,1)

z_array = np.dot(X, weight_bias)

# 2.计算sigmoid

p_array = [1 / (1 + np.e ** (-i)) for i in z_array.flatten()]

return np.array(p_array).reshape(-1, 1)

def calc_gradient(self, X, y, weight_bias):

"""计算梯度W = X.T*(p_array-y)--->(m,n).T* (m, 1)"""

# 1. 计算sigmoid= 1 / (1 + e^-z)

p_array = self.calc_sigmoid(X, weight_bias)

# 2.计算梯度grad = (n,m) * (m, 1)--->(n, 1)

grad = np.dot(X.T, p_array-y) * (1/self.m)

return grad

def predict(self, X):

"""

1. 计算y = X*weight_bias

2. 计算sigmoid(如果概率大于0.5则标记为1,否则标记为0)

:param X: 测试集的特征值

:return: 返回预测的分类结果

"""

X = self.transform_X(X)

p_array = self.calc_sigmoid(X, self.weight_bias)

y_predict = list(map(lambda x: 1 if x >= 0.5 else 0, p_array))

return np.array(y_predict).reshape(-1, 1)

def score(self, X, y):

"""

计算模型的准确率:正确个数/总个数

1. 计算预测的分类结果

2. 实际分类结果和预测分类结果进行对比计算

:param X: 特征值

:param y: 目标值

:return: 准确率

"""

# 1.计算预测分类的结果

y_predict = self.predict(X)

# 2.计算预测正确的分类数(循环判断,相同增加1,得到correct_number)

y, y_predict = y.flatten(), y_predict.flatten()

m = y_predict.shape[0]

correct_number = sum([True for i in range(m) if y[i] == y_predict[i]])

# 3.计算正确率:正确个数/总个数(保留小数点后两位)

possiblity = round(correct_number/m, ndigits=4)

return possiblity

if __name__ == '__main__':

# data = pd.read_csv('./data/iris_data.csv')

# X, y = data.iloc[:, 0:-1].values, data.iloc[:, -1:].values

# 癌症数据集https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/

data = pd.read_csv('./data/wdbc.data')

X, y = data.iloc[:, 2:].values, data.iloc[:, 1]

y = y.replace('M', 1)

y = y.replace('B', 0).values

# 确保X, y输入为(m,n) (m,1)

# 实例化LogisticRegression

logistic = LogisticRegression()

# 对输入的数据进行预处理:划分数据集和标准化处理

X_train, X_test, y_train, y_test = logistic.preprocess_data(X, y)

# X_train, X_test, y_train, y_test = logistic.train_test_split(X, y)

# X_train, X_test = logistic.standardization([X_train, X_test])

# print(X_train)

# print(X_test)

# 训练模型

logistic.fit(X_train, y_train)

# # 查看对数似然损失

print(logistic.mean_cost)

print(logistic.coef)

print(logistic.intercept)

y_predict = logistic.predict(X_test)

print('预测分类结果为:', y_predict.flatten())

print('实际分类结果为:', y_test.flatten())

print('预测准确率为:', logistic.score(X_test, y_test))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值