使用AlexNet训练mnist(面向对象)

from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.optimizers import SGD, Adam
from keras.layers import Dense, Flatten, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.models import load_model
import keras
import numpy as np
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
from keras.datasets import cifar10
from tensorflow.examples.tutorials.mnist import input_data
K.clear_session()
mnist = input_data.read_data_sets("MNIST_DATA", one_hot=True)
class AlexModel:
    #初始化参数
    def __init__(self, epochs, batch_size):
        """
        :param epochs: 训练集迭代的轮数
        :param batch_size: 每次训练的样本的个数
        """
        self.epochs = epochs
        self.batch_size = batch_size
        # 存储训练过程中的精度和误差
        self.train_accuracy_and_loss = None
    # 创建模型
    def build_model(self):
        """
        创建模型, 基于alexnet
        :return: 
        """

        model = Sequential()
        #第一层卷积网络,使用96个卷积核,大小为11x11步长为4, 要求输入 1个通道,激活函数使用relu
        model.add(Conv2D(96, (11, 11), strides=(4, 4), input_shape=(28, 28, 1), padding='valid', activation='relu',
                         kernel_initializer='uniform'))
        # 池化层
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))

        # 第二层加边使用256个5x5的卷积核,加边,激活函数为relu
        model.add(Conv2D(256, (5, 5), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
        #使用池化层,步长为2
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))

        # 第三层卷积,大小为3x3的卷积核使用384个
        model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
        # 第四层卷积,同第三层
        model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
        # 第五层卷积使用的卷积核为256个,其他同上
        model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
        model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
        # 将卷积展开为神经元
        model.add(Flatten())
        # 第1层隐藏全连接层使用4096个神经元
        model.add(Dense(4096, activation='relu'))
        # dropout正则化
        model.add(Dropout(0.5))
        # 第2层隐藏使用4096个神经元
        model.add(Dense(4096, activation='relu'))
        model.add(Dropout(0.5))
        # 输出层输出类别个数
        model.add(Dense(10, activation='softmax'))
        # 选用adam优化器,学习率为0.0003
        adam = Adam(lr=0.0003, decay=1e-6)
        # 编译模型
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        return model
    # 保存模型
    def save_model_after_train(self):
        model = self.build_model()
        x_train, y_train = mnist.train.images, mnist.train.labels
        x_train = x_train.reshape(55000, 28, 28, 1)
        self.train_accuracy_and_loss = model.fit(x_train, y_train, batch_size=self.batch_size, epochs=self.epochs)
        model.save("model.h5")
    # 加载模型
    def load_model(self):
        return load_model("model.h5")
    # 训练模型
    def train(self, mnist):
        modle = self.build_model()

        x_train, y_train = mnist.train.images, mnist.train.labels
        x_train = x_train.reshape(55000, 28, 28, 1)
        # {'acc': [], 'loss': []}
        self.train_accuracy_and_loss = modle.fit(x_train, y_train, batch_size=self.batch_size,
                                                 epochs=self.epochs,
                                                 callbacks=[TensorBoard(log_dir='mytensorboard/3')])

        # 获取训练过程中的损失(每个epoch)
    def get_train_loss(self):
        return self.train_accuracy_and_loss.history["loss"]
    # 获取训练过程中的精度(每个epoch)
    def get_train_accuracy(self):
        return self.train_accuracy_and_loss.history["acc"]
    # 测试集的精度和误差
    def test_accuracy_and_loss(self):
        """"将训练好的模型直接拿过来用"
        :return: 返回精度和损失
        """
        model = self.load_model()
        x_test, y_test= mnist.test.images, mnist.test.labels
        x_test = x_test.reshape(10000, 28, 28, 1)
        score = model.evaluate(x_test, y_test, batch_size=32)
        return score[1], score[0]

    
model = AlexModel(epochs=2, batch_size=256)
model.train(mnist)
loss = model.get_train_loss()
acc = model.get_train_accuracy()
print(acc)
MNIST数据集是一个手写数字识别数据集,而AlexNet是一个用于图像分类的深度学习模型,因此可以使用AlexNet模型对MNIST数据集进行训练和分类。 以下是使用PyTorch框架和AlexNet模型对MNIST数据集进行训练和分类的代码示例: ```python import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms from torch.utils.data import DataLoader # 定义数据预处理 transform = transforms.Compose([ transforms.Resize((227, 227)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # 加载MNIST数据集 train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform) test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform) # 定义数据加载器 train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False) # 定义AlexNet模型 class AlexNet(nn.Module): def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x # 定义训练函数 def train(model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = nn.CrossEntropyLoss()(output, target) loss.backward() optimizer.step() if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) # 定义测试函数 def test(model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += nn.CrossEntropyLoss(reduction='sum')(output, target).item() pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) # 定义训练参数 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AlexNet().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # 训练模型 for epoch in range(1, 11): train(model, device, train_loader, optimizer, epoch) test(model, device, test_loader) ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值