from sklearn import datasets
from sklearn.model_selection import train_test_split
import numpy as np
data = datasets.load_iris()
X = data['data']
y = data['target']
# n:特征数,k:标签数
n = X.shape[1]
label = np.unique(y)
k = len(label)
X = np.insert(X, 0, 1, axis=1) # 添加一列 m*n+1
X_train, X_test, Y_train, Y_test = train_test_split(X, y, train_size=0.75, test_size=0.15, random_state=1)
# m:样本数
m = X_train.shape[0]
def getGradient(theta):
# 生成梯度矩阵G
G = np.zeros((k, m)) # k*m
for i in range(m):
G[Y_train[i]][i] = 1
# 生成假设矩阵H
H = hypothesis(theta, X_train)
# 计算梯度
return np.dot((G - H), X_train)
def hypothesis(theta, X):
eta = np.dot(theta, X.T) # k*m
H = np.exp(eta) # k*m
H /= np.sum(H, axis=0)
return H
d
3.3 softmax回归推导及python实现
最新推荐文章于 2024-01-31 12:36:52 发布