逻辑回归 实现

逻辑回归

导入包

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

人工数据集

n = 100
X = np.array(np.random.normal(3,2,size=(n//2,1)), dtype = np.float32)
X = np.insert(X, 50,np.random.normal(-3,2,size=(n//2,1)), 0)
y = np.array([[i<50] for i in range(n)])
X = np.insert(X, 0, 1, 1)
plt.scatter(X[:,1], y)
<matplotlib.collections.PathCollection at 0x7f4efaccd120>

image

sigmoid函数

def sigmoid(x):
    return 1 / (1 + np.exp(-x))
sigmoid(np.array([[0], [2], [3], [4]]))
array([[0.5       ],
       [0.88079708],
       [0.95257413],
       [0.98201379]])

损失函数

def compterCost(X, theta, y):
    h = sigmoid(X@theta)
    return -sum(y*np.log(h)+(1-y)*np.log(1-h))
compterCost(X, np.array([[0], [0]]), y)
array([69.31471806])

梯度

def getGradient(X, theta, y):
    return ((sigmoid(X@theta)-y).T @ X).T
getGradient(X, np.array([[0], [0]]), y)
array([[   0.        ],
       [-146.22103123]])

梯度下降

def gradientDescent(X, theta, y, alpha, iters):
    cost = np.zeros(iters + 1)
    cost[0] = compterCost(X, theta, y)
    print(f"loop {0}'s cost is {cost[0]}")
    for i in range(iters):
        theta = theta - getGradient(X, theta, y)*alpha
        cost[i+1] = compterCost(X, theta, y)
        print(f"loop {i+1}'s cost is {cost[i+1]}")
    plt.plot(range(iters+1), cost)
    #print(cost)
    return theta
theta = np.zeros([2, 1])
theta = gradientDescent(X, theta, y, 0.01, 20)
#range(20+1),cost
loop 0's cost is 69.31471805599459
loop 1's cost is 12.92710178128891
loop 2's cost is 12.7791523011902
loop 3's cost is 12.654822204609484
loop 4's cost is 12.548907073302571
loop 5's cost is 12.457644533810031
loop 6's cost is 12.378241881883877
loop 7's cost is 12.308579481987612
loop 8's cost is 12.247017618288151
loop 9's cost is 12.192266756969792
loop 10's cost is 12.143298046907665
loop 11's cost is 12.099280129089411
loop 12's cost is 12.059533602048644
loop 13's cost is 12.023497611864162
loop 14's cost is 11.990704939861654
loop 15's cost is 11.960763155726909
loop 16's cost is 11.933340171581314
loop 17's cost is 11.90815303709544
loop 18's cost is 11.884959153871923
loop 19's cost is 11.863549318093545
loop 20's cost is 11.843742160520215

image

预测函数

def getMy(x):
    print(np.insert(x, 0, 1, 1),)
    return sigmoid(np.insert(x, 0, 1, 1) @ theta)
getMy(np.array([[1],[2]]))
[[1 1]
 [1 2]]
array([[0.86486794],
       [0.97715609]])

拟合图像

plt.scatter(X[:,1], y)
k = np.arange(-3,3,0.01)
plt.plot(k, getMy(k.reshape(-1,1)))
plt.scatter(0, .5, marker = '+')
[[ 1.   -3.  ]
 [ 1.   -2.99]
 [ 1.   -2.98]
 ...
 [ 1.    2.97]
 [ 1.    2.98]
 [ 1.    2.99]]





<matplotlib.collections.PathCollection at 0x7f4ef8a76a70>

image

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值