逻辑回归代码_机器学习——逻辑回归,正则化代码实现及其公式推导

#求Sigmoid函数def Sigmoid(z):    g = 1 / (1 + np.exp(-z))    return g
#将z = X * theta 带入Sigmoid函数def model(X, theta):    return Sigmoid(X * theta)
#求J(theta)def computCost(X, y, theta):    m = X.shape[0]    inner = float(y.T * np.log(model(X, theta)) + (1 - y).T * np.log(1 - model(X, theta)))    return (- 1 / m) * (inner)
#求J(theta(j))的偏导def gradient(X, y, theta):    m = X.shape[0]    J = theta.shape[0]    grad = np.zeros(theta.shape)    for j in range(J):            term = ((model(X, theta) - y).T * X[ : , j])            grad[j, 0] = term / m    return grad
#批量梯度下降def BGD(X, y, theta, alpha, liters):    cost = [computCost(X, y, theta)]    m = X.shape[0]    for k in range(liters):        theta = theta - (alpha * gradient(X, y, theta))        cost.append(computCost(X, y, theta))    return theta, cost

ae808ff070b2d6cfc2be9065ccb8e317.png

252d1856431352eadf052bec18413228.png

批量梯度下降需要迭代110000万次

多分类:

3ab17fcdd5c30791c720645d68a478c1.png

代价函数求导:

5f5aabdeb937e103ee106bb4539e1719.png

代价函数为平方时非凸:

5b9df66f0d427ffcc0d7a8e45b5e58a3.png

#特征映射def feature_mapping(X1, X2, power):    data = {}    for i in range(power + 1):        for j in range(i + 1):            data['F{}{}'.format(i - j, j)] = np.power(X1, i - j) * np.power(X2, j)    return pd.DataFrame(data)
#正则化逻辑回归代价函数J(theta)def computCost(X, y, theta, lamda):    m = X.shape[0]    left = (- 1 / m) * float(y.T * np.log(model(X, theta)) + (1 - y).T * np.log(1 - model(X, theta)))    right = np.sum(np.power(theta[1:],2)) * (lamda / (2 * m))    return left + right
#求J(theta(j))的偏导def gradient(X, y, theta):    m = X.shape[0]    J = theta.shape[0]    grad = np.zeros(theta.shape)    for j in range(J):            term = ((model(X, theta) - y).T * X[ : , j])            grad[j, 0] = term / m    return grad#正则化逻辑回归批量梯度下降def BGD(X, y, theta, alpha, liters, lamda):    cost = [computCost(X, y, theta, lamda)]    for k in range(liters):        reg = theta[1:] * (lamda / X.shape[0])        reg = np.insert(reg,0,values=0,axis=0)        theta = theta - (alpha * gradient(X, y, theta)) - reg        cost.append(computCost(X, y, theta, lamda))    return theta, cost

39af02fadf8779add83f1fe0d2127d56.png

#线性回归正则化计算损失函数def regularized_Cost(X, y, theta, lamda):    m = X.shape[0]    cost = np.power((X * theta - y), 2)    reg = lamda * np.power(theta[1 : ], 2)    return (np.sum(cost) + np.sum(reg)) / (2 * m)#求梯度def gradient(X, y, theta):    m = X.shape[0]    J = theta.shape[0]    grad = np.matrix(np.zeros(theta.shape))    for j in range(J):            term = (((X * theta) - y).T * X[ : , j])            grad[j, 0] = term / m    return grad#正则化梯度def regularized_gradient(X, y, theta, lamda):    m = X.shape[0]    regularized_term = theta.copy()    regularized_term[0] = 0    regularized_term = (lamda / m) * regularized_term    return gradient(X, y, theta) + regularized_term
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值