import numpy as np
# sigmod函数
def sigmod(x):
return 1/(1+np.exp(-x))
# 逻辑回归
def logistic_regression(feature, target):
feature['b'] = 1 # 截距项
n = feature.shape[1]
m = feature.shape[0]
weight = np.random.randn(n) # 初始化权重
fx = (weight*feature).sum(1) # 拟合函数
loss =-(target['target']*np.log(sigmod(fx))+(1-target['target'])*np.log(1-sigmod(fx))).sum()/m # 损失函数
rate=0.01 # 学习率
while True: # 梯度下降停止条件
if loss<0.3:
break
else:
grad=(np.array(sigmod(fx)-target['target']).reshape(m,1)*feature).sum()/n # 梯度计算
weight=weight+rate*np.array(grad) # 权重更新
fx = (weight * feature).sum(1) # 重新拟合函数
loss = -(target['target'] * np.log(sigmod(fx)) + (1 - target['target']) * np.log(
1 - sigmod(fx))).sum() / m # 重新计算损失
ans=[]
for col in sigmod(fx).items():
if col[1]>=0.5:
ans.append(1)
else:
ans.append(0)
return ans
LR(逻辑回归)numpy简单代码实现
于 2023-11-06 18:26:50 首次发布