from sklearn.datasets import make_classification import torch from torch.autograd import Variable from torch import nn from sklearn.utils import shuffle import numpy as np X, y1 = make_classification(n_samples=10, n_features=100, n_informative=30, n_classes=3, random_state=1) y2 = shuffle(y1, random_state=1) # 分类结果随机排序 y3 = shuffle(y1, random_state=2) # 分类结果随机排序 Y = np.vstack((y1, y2, y3)).T # 多种分类结果组合成 from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder() a = enc.fit_transform(Y).toarray() print(a) X_train = Variable(torch.Tensor(X).float()) Y_train = Variable(torch.Tensor(a).float()) #X_test = torch.Tensor(X_test).float() class simpleNet(nn.Module): def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim): super(simpleNet, self).__init__() self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1), nn.ReLU(True)) self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2), nn.Sigmoid()) self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim)) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) return x model = simpleNet(100, 64, 32, 9) loss_func = nn.MultiLabelSoftMarginLoss() opt = torch.optim.Adam(model.parameters(), lr=0.1) loss_count = [] for epoch in range(1): for i in range(200): batch_x = X_train # torch.Size([128, 1, 28, 28]) batch_y = Y_train # torch.Size([128]) out = model(batch_x) # torch.Size([128,10]) # 获取损失 loss = loss_func(out, batch_y) # 使用优化器优化损失 opt.zero_grad() # 清空上一步残余更新参数值 loss.backward() # 误差反向传播,计算参数更新值 opt.step() # 将参数更新值施加到net的parmeters上 loss_count.append(loss) print('{}:\t'.format(i), loss.item()) torch.save(model, r'C:\Users\Administrator\PycharmProjects\data\cnn') print(model(X_train))
基于pytorch的神经网络训练多标记数据
最新推荐文章于 2022-06-19 10:05:44 发布