吴恩达深度学习L4W2编程作业pytorch实现

import torch
from torch import nn
from torch.utils.data import Dataset,DataLoader
import numpy as np
import h5py

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_dataset():
    train_dataset = h5py.File('datasets/train_signs.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
 
    test_dataset = h5py.File('datasets/test_signs.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
 
    classes = np.array(test_dataset["list_classes"][:]) # the list of classes
    
    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
    
    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
 
 
 
X_train, Y_train, X_test, Y_test, classes = load_dataset()
 
X_train = X_train.transpose((0,3,1,2))/255
X_test = X_test.transpose((0,3,1,2))/255
 
X_train = torch.from_numpy(X_train).to(torch.float).to(device)
Y_train = torch.squeeze(torch.from_numpy(Y_train)).to(device)
 
X_test = torch.from_numpy(X_test).to(torch.float).to(device)
Y_test = torch.squeeze(torch.from_numpy(Y_test)).to(device)
 
class MyDataset(Dataset):
    def __init__(self, X, Y):
        self.X = X
        self.Y = Y
 
    def __len__(self):
        return len(self.Y)
 
    def __getitem__(self, idx):
        x = self.X[idx]
        y = self.Y[idx]
        return x, y
    
training_data = MyDataset(X_train,Y_train)
test_data = MyDataset(X_test,Y_test)
 
train_dataloader = DataLoader(training_data, batch_size=32, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=32, shuffle=True)

class Residual(torch.nn.Module):
    def __init__(self, in_channels, out_channels1, out_channels2, out_channels3, stride=1):
        super(Residual, self).__init__()
        self.stride = stride
       
        self.conv1 = nn.Conv2d(in_channels, out_channels1, kernel_size=1, stride=stride)
        self.bn1 = nn.BatchNorm2d(out_channels1)
        self.relu1 = nn.ReLU(inplace=True)
        
        self.conv2 = nn.Conv2d(out_channels1, out_channels2, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels2)
        
        self.conv3 = nn.Conv2d(out_channels2, out_channels3, kernel_size=1, stride=1)
        self.bn3 = nn.BatchNorm2d(out_channels3)
        
        if in_channels != out_channels3:
            self.shortcut = torch.nn.Conv2d(in_channels, out_channels3, kernel_size=1, stride=stride)
        else:
            self.shortcut = None

    def forward(self, x):
        out = self.relu1(self.bn1(self.conv1(x)))
        out = self.relu1(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))

        if self.shortcut:
            x = self.bn3(self.shortcut(x))

        out = self.relu1(out + x)
        return out
class ResNet(nn.Module):
    def __init__(self, in_channels, num_classes):
        super(ResNet, self).__init__()
        self.stage1 = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )

        self.stage2 = nn.Sequential(
            Residual(64,64,64,256),
            Residual(256,64,64,256),
            Residual(256,64,64,256)
        )

        self.stage3 = nn.Sequential(
            Residual(256,128,128,512,2),
            Residual(512,128,128,512),
            Residual(512,128,128,512),
            Residual(512,128,128,512)
        )

        self.stage4 = nn.Sequential(
            Residual(512,256,256,1024,2),
            Residual(1024,256,256,1024),
            Residual(1024,256,256,1024),
            Residual(1024,256,256,1024),
            Residual(1024,256,256,1024),
            Residual(1024,256,256,1024)
        )

        self.stage5 = nn.Sequential(
            Residual(1024,512,512,2048,2),
            Residual(2048,256,256,2048),
            Residual(2048,256,256,2048)
        )

        self.avg_pool = nn.AvgPool2d(kernel_size=2,stride=2)
        self.fc = nn.Linear(2048, num_classes)

    def forward(self, x):
        out = self.stage1(x)
        out = self.stage2(out)
        out = self.stage3(out)
        out = self.stage4(out)
        out = self.stage5(out)

        out = self.avg_pool(out)
        out = out.view(out.size()[0], -1)

        out = self.fc(out)
        return out


model = ResNet(3,6)
model.to(device)
def train_loop(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    train_loss, correct = 0, 0
    
    for X, y in dataloader:
        # Compute prediction and loss
        pred = model(X)
        loss = loss_fn(pred, y)
        
        train_loss += loss.item()
        correct += (pred.argmax(1) == y).type(torch.float).sum().item()
 
        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
    train_loss /= num_batches
    correct /= size
    print(f"Train Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {train_loss:>8f} \n")
 
 
def test_loop(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_loss, correct = 0, 0
 
    with torch.no_grad():
        for X, y in dataloader:
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
 
    test_loss /= num_batches
    correct /= size
    print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 10
learning_rate = 0.001
 
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
 
for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------------")
    train_loop(train_dataloader, model, loss_fn, optimizer)
    test_loop(test_dataloader, model, loss_fn)
print("Done!")
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值