图卷积神经网络

import paddle
import paddle.nn.functional as F
from paddle.vision.transforms import ToTensor
import numpy as np
import matplotlib.pyplot as plt

transform = ToTensor()
cifar10_train = paddle.vision.datasets.Cifar10(mode=‘train’,transform=transform)
cifar10_test = paddle.vision.datasets.Cifar10(mode=‘test’,transform=transform)

class MyNet(paddle.nn.Layer):
def init(self, num_classes=1):
super(MyNet, self).init()

    self.conv1 = paddle.nn.Conv2D(in_channels=3, out_channels=32, kernel_size=(3, 3))
    self.pool1 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)

    self.conv2 = paddle.nn.Conv2D(in_channels=32, out_channels=64, kernel_size=(3,3))
    self.pool2 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)

    self.conv3 = paddle.nn.Conv2D(in_channels=64, out_channels=64, kernel_size=(3,3))

    self.flatten = paddle.nn.Flatten()

    self.linear1 = paddle.nn.Linear(in_features=1024, out_features=64)
    self.linear2 = paddle.nn.Linear(in_features=64, out_features=num_classes)

def forward(self, x):
    x = self.conv1(x)
    x = F.relu(x)
    x = self.pool1(x)

    x = self.conv2(x)
    x = F.relu(x)
    x = self.pool2(x)

    x = self.conv3(x)
    x = F.relu(x)

    x = self.flatten(x)
    x = self.linear1(x)
    x = F.relu(x)
    x = self.linear2(x)
    return x

epoch_num = 10
batch_size = 32
learning_rate = 0.001
val_acc_history = []
val_loss_history = []

def train(model):
print('start training … ')
# turn into training mode
model.train()

opt = paddle.optimizer.Adam(learning_rate=learning_rate,
                            parameters=model.parameters())

train_loader = paddle.io.DataLoader(cifar10_train,
                                    shuffle=True,
                                    batch_size=batch_size)

valid_loader = paddle.io.DataLoader(cifar10_test, batch_size=batch_size)

for epoch in range(epoch_num):
    for batch_id, data in enumerate(train_loader()):
        x_data = data[0]
        y_data = paddle.to_tensor(data[1])
        y_data = paddle.unsqueeze(y_data, 1)

        logits = model(x_data)
        loss = F.cross_entropy(logits, y_data)

        if batch_id % 1000 == 0:
            print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, loss.numpy()))
        loss.backward()
        opt.step()
        opt.clear_grad()

    # evaluate model after one epoch
    model.eval()
    accuracies = []
    losses = []
    for batch_id, data in enumerate(valid_loader()):
        x_data = data[0]
        y_data = paddle.to_tensor(data[1])
        y_data = paddle.unsqueeze(y_data, 1)

        logits = model(x_data)
        loss = F.cross_entropy(logits, y_data)
        acc = paddle.metric.accuracy(logits, y_data)
        accuracies.append(acc.numpy())
        losses.append(loss.numpy())

    avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)
    print("[validation] accuracy/loss: {}/{}".format(avg_acc, avg_loss))
    val_acc_history.append(avg_acc)
    val_loss_history.append(avg_loss)
    model.train()

model = MyNet(num_classes=10)
train(model)

plt.plot(val_acc_history, label = ‘validation accuracy’)

plt.xlabel(‘Epoch’)
plt.ylabel(‘Accuracy’)
plt.ylim([0.5, 0.8])
plt.legend(loc=‘lower right’)

请添加图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值