随机梯度下降的逻辑回归代码
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
ld = load_iris()
x_data = ld.data
y_target = ld.target
y_train = y_target[y_target != 2]
a = []
for i, j in enumerate(y_train):
a.append(x_data[i])
x_train = pd.DataFrame(a)
class logistic_regression():
def fit(self, x_train, y_train, k=200):
self.x_train = x_train
self.y_train = y_train
self.k = k
def sigmoid(self, inx):
return 1 / (1 + np.exp(-inx))
def logistic_regression_model(self):
x_train_1 = np.array(self.x_train)
y_train_1 = np.array(self.y_train)
weights = np.ones(np.shape(x_train_1)[1])
alpha = 0.0001
for j in range(self.k):
for i in range(np.shape(x_train_1)[0]):
h = self.sigmoid(np.sum(x_train_1[i] * weights))
error = h - y_train_1[i]
weights = weights - alpha * error * x_train_1[i]