softmax分类器训练和测式自己的数据集

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.optim
import torch.utils.data
import torch.utils.data.distributed


# Training settings
batch_size = 20
transform = transforms.ToTensor()
torch.manual_seed(seed=2021)


dataset = datasets.ImageFolder('E:\\pythonwork\\LPDdetection\\Chinese-license-plate-recognition-system\\Train\\plate', transform)
n_dataset = len(dataset)
per = 0.9
n_train = int(n_dataset*per)
n_test = n_dataset - n_train
print("n_train ",n_train )
print("n_test ",n_test )
print(len(dataset))
train_dataset, test_dataset = torch.utils.data.random_split(
    dataset,
    [n_train, n_test],
)
# print("train_dataset",len(train_dataset))
# 对应文件夹的label
# print(train_dataset.class_to_idx)
# test_dataset = datasets.ImageFolder('E:\\pythonwork\\LPDdetection\\Chinese-license-plate-recognition-system\\Train\plate', transform)
# 对应文件夹的label

# print(test_dataset.class_to_idx)



# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = nn.Linear(14688, 520)
        self.l2 = nn.Linear(520, 320)
        self.l3 = nn.Linear(320, 240)
        self.l4 = nn.Linear(240, 120)
        self.l5 = nn.Linear(120, 2)

    def forward(self, x):

        # Flatten the data (n, 1, 28, 28) --> (n, 784)
        x = x.view(-1, 14688)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return F.log_softmax(self.l5(x), dim=1)
        #return self.l5(x)

model = Net()

optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

def train(epoch):
    # 每次输入barch_idx个数据
    # print("epoch",epoch)
    for batch_idx, (data, target) in enumerate(train_loader):
        # print("batch_idx",batch_idx)
        data, target = Variable(data), Variable(target)
        # print("data, target ", len(data), len(target))
        # print("data, target ", type(data), type(target))
        optimizer.zero_grad()
        output = model(data)
        # loss
        loss = F.nll_loss(output, target)
        loss.backward()
        # update
        optimizer.step()
        if batch_idx % 200 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.data))

def val():
    test_loss = 0
    correct = 0
    # 测试集
    with torch.no_grad():
        for data, target in test_loader:
            data, target = Variable(data), Variable(target)
            output = model(data)
            # sum up batch loss
            test_loss += F.nll_loss(output, target)
            # get the index of the max
            pred = output.data.max(1, keepdim=True)[1]
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()

        test_loss /= len(test_loader.dataset)
        print("\ntest_loss",test_loss)
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))

for epoch in range(1,6):
    train(epoch)
    val()




  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,我可以为您编写这段代码。以下是基于MindSpore的Softmax分类器分类MNIST数据集的Python代码示例: ``` import mindspore.nn as nn from mindspore import Tensor, context from mindspore.dataset import MnistDataset, download_mnist from mindspore.train import Model from mindspore.nn.metrics import Accuracy context.set_context(mode=context.GRAPH_MODE, device_target="CPU") # 下载MNIST数据集 mnist_path = "./MNIST_Data" download_mnist(mnist_path) # 读入MNIST数据集 ds = MnistDataset(mnist_path, train_mode=True) # 构建Softmax分类器 class SoftmaxNet(nn.Cell): def __init__(self, num_classes=10): super(SoftmaxNet, self).__init__() self.flatten = nn.Flatten() self.fc = nn.Dense(28 * 28, num_classes) def construct(self, x): x = self.flatten(x) x = self.fc(x) return x net = SoftmaxNet() # 定义损失函数和优化器 criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') optimizer = nn.Adam(net.trainable_params(), learning_rate=0.001) # 定义训练模型的方法 model = Model(net, criterion=criterion, optimizer=optimizer, metrics={"Accuracy": Accuracy()}) # 训练模型 model.train(epoch=10, train_dataset=ds, callbacks=None) # 在测试集上测试模型表现 test_ds = MnistDataset(mnist_path, train_mode=False) acc = model.eval(test_ds, dataset_sink_mode=False) print(f"Test accuracy: {acc['Accuracy']}") ``` 这段代码可以在MindSpore 1.3版本上运行。首先,我们从MindSpore库中导入所需的模块,并设置MindSpore的运行环境为图模式和CPU设备。 接下来,我们下载MNIST数据集并读入训练集数据。 然后,我们定义一个名为SoftmaxNet的Softmax分类器模型,该模型包括一个Flatten层和一个Dense层。在构建模型后,我们定义了损失函数和优化器,该模型将使用这些来训练。最后,我们使用训练集对模型进行训练,并使用测试集评估模型的准确性。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值