用Keras和PyTorch实现简单神经网络

用Keras和PyTorch实现简单神经网络

Keras

from __future__ import print_function

import numpy as np
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.utils import np_utils

np.random.seed(1671)

NB_EPOCH = 10
BATCH_SIZE = 128
VERBOSE = 1
NB_CLASSES = 10
OPTIMIZER = RMSprop()
N_HIDDEN = 128
VALIDATION_SPLIT = 0.2  # 训练集中用作验证集的数据比例
DROPOUT = 0.3

(X_train, y_train), (X_test, y_test) = mnist.load_data()
RESHARPED = 784
X_train = X_train.reshape(60000, RESHARPED)
X_test = X_test.reshape(10000, RESHARPED)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')

X_train /= 255
X_test /= 255

print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)

# N_HIDDEN个隐藏层

model = Sequential()
model.add(Dense(N_HIDDEN, input_shape=(RESHARPED,)))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(N_HIDDEN))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.summary()

model.compile(loss='categorical_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])

history = model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE,
                    validation_split=VALIDATION_SPLIT)
score = model.evaluate(X_test, Y_test, verbose=VERBOSE)
print("Test score:", score[0])
print("Test accuracy:", score[1])

PyTorch

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

NB_EPOCH = 5
BATCH_SIZE = 128
DROPOUT = 0.3
LEARNING_RATE = 0.01
VALIDATION_SPLIT = 0.2

train_dataset = datasets.MNIST(root='./mnist/', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root='./mnist', train=False, transform=transforms.ToTensor())
train_num = int(len(train_dataset) * (1 - VALIDATION_SPLIT))
val_num = int(len(train_dataset) * VALIDATION_SPLIT)

train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_num, val_num])
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, 10)

    def forward(self, x):
        x = x.view(-1, 28 * 28)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, p=DROPOUT)
        x = F.relu(self.fc2(x))
        x = F.dropout(x, p=DROPOUT)
        x = F.log_softmax(self.fc3(x), dim=1)
        return x


def train(epoch):
    for batch_idx, (data, target) in enumerate(train_loader):
        output = net(data)
        loss = F.cross_entropy(output, target)
        loss.backward()
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))
        optimizer.step()
        optimizer.zero_grad()


def validate(epoch):
    for batch_idx, (data, target) in enumerate(val_loader):
        output = net(data)
        loss = F.cross_entropy(output, target)
        if batch_idx % 50 == 0:
            print('Validation Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(val_loader.dataset),
                       100. * batch_idx / len(val_loader), loss.item()))


def test():
    test_loss = 0
    test_acc = 0
    with torch.no_grad():
        for img, label in test_dataset:
            output = net(img)
            label = torch.tensor([label])
            loss = F.cross_entropy(output, label)
            test_loss += loss.item()
            pred = output.data.max(1, keepdims=True)[1]
            test_acc += (pred == label).sum().item()
    print("Test score:", test_loss / len(test_dataset))
    print("Test accuracy:", test_acc / len(test_dataset))


net = Net()
optimizer = optim.RMSprop(net.parameters(), lr=LEARNING_RATE)

for epoch in range(0, NB_EPOCH):
    train(epoch)
    net.eval()
    validate(epoch)
    net.train()

net.eval()
test()
  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值