pytorch实战(正在更新)

本文参照多个视频,

首先见B站刘二大人的《PyTorch深度学习实践》完结合集。所有代码作者亲测

进入刘二大人视频课程

 

首先初步使用torch.tensor

 

import torch
import numpy as np
#直接把List放进去
x = torch.tensor([[1,-1],[-1,1]])
#把numpy矩阵放进from_numpy函数中
x = torch.from_numpy(np.array([[1,-1],[-1,1]]))

#产生一个全零tensor(二维)
x = torch.zeros([2,2])
#产生一个全一tensor(三维)
x = torch.ones([1,2,5])
x = torch.tensor([[1,-1],[-1,1]])

x = x.unsqueeze(1)

#把两个维度对调,当只有两维的矩阵对调时即求转置
x = torch.zeros([2,1,3])
y = torch.zeros([2,3,3])
z = torch.zeros([2,2,3])
w = torch.cat([x,y,z],dim = 1)

x = torch.tensor([[1.,0.],[-1.,1.]],requires_grad=True)
#Loss计算公式(构建计算图),这是前馈过程Forward
z = x.pow(2).sum()
#反向传播backward,计算各个梯度
z.backward()
#读出x的梯度
print(x.grad)

实现梯度下降

import numpy as np
import matplotlib.pyplot as plt

x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]
w=1.0


def forward(x):
    return x*w

def cost(xs,ys):
    cost = 0
    for x,y in zip(xs,ys):
        y_pred = forward(x)
        cost +=(y_pred-y)**2
    return cost/len(xs)

def gradient(xs,ys):
    grad = 0
    for x,y in zip(xs,ys):
        grad += 2*x*(x*w-y)
    return grad/len(xs)

for epoch in range(100):
    cost_val = cost(x_data, y_data)
    grad_val = gradient(x_data,y_data)
    w+=-0.01*grad_val
    print('epoch:',epoch,'w=',w,'loss=',cost_val)

利用torch.tensor可以自动后向传播

 

import torch
import matplotlib.pyplot as plt

x_data = [1.0,2.0,3.0]
y_data = [4.0,8.0,12.0]

#set initial weights and bias
w1 = torch.tensor([0.0])
w2 = torch.tensor([1.0])
b = torch.tensor([0.0])

w1.requires_grad  = True
w2.requires_grad = True
b.requires_grad =True

epoch_list = []
l_list = []# loss list

# define forward propagation
def forward(x):
    return w1*x*x+w2*x+b

def loss(x,y):
    y_pred=forward(x)
    return (y_pred-y)**2

for epoch in range(100):
    for x,y in zip(x_data, y_data):
        l=loss(x,y)
        l.backward()
        #print('\tgrad:', x, y, w1.grad.item(), w2.grad.item(),b.grad.item(),l.item())
        w1.data = w1.data - 0.03 * w1.grad.data
        w2.data = w2.data - 0.03 * w2.grad.data
        b.data = b.data - 0.05 * b.grad.data
        # set parameters' gradient to zero
        w1.grad.data.zero_()
        w2.grad.data.zero_()
        b.grad.data.zero_()

        l_list.append(l.item())
        epoch_list.append(epoch)
    print('progress', epoch, l.item())

print('predict(after training)', 4, forward(4).item())

实现一个简单的线性模型

import torch
x_data = torch.Tensor([[1.0],[2.0],[3.0]])
y_data = torch.Tensor([[2.0],[4.0],[6.0]])

class LinearModel(torch.nn.Module):
	def __init__(self):
		super(LinearModel,self).__init__()#调用父类的init
        # (1,1)是指输入x和输出y的特征维度,这里数据集中的x和y的特征都是1维的
		self.linear = torch.nn.Linear(1,1)
	def forward(self,x):
		y_pred = self.linear(x)
		return y_pred
# 设定模型
model = LinearModel()
# 判别器
criterion = torch.nn.MSELoss(reduction='sum')
# 优化器
optimizer = torch.optim.SGD(model.parameters(),lr = 0.01,momentum = 0)
for epoch in range(100):
	y_pred = model.forward(x_data)
	loss = criterion(y_pred,y_data)
	print(epoch,loss)
	optimizer.zero_grad()
	loss.backward()
	optimizer.step()
	print('w=',model.linear.weight.item())
	print('b=',model.linear.bias.item())

实现Logistics回归

import torch
import numpy as np
import matplotlib.pyplot as plt

x_data = torch.Tensor([[1.0],[2.0],[3.0]])
y_data = torch.Tensor([[0],[0],[1]])

class LogisticRegressionModel(torch.nn.Module):
    def __init__(self):
        super(LogisticRegressionModel,self).__init__()
        self.linear1 = torch.nn.Linear(1,5000)# 我发现神经元数量到这个程度精度才够
        self.linear2 = torch.nn.Linear(5000,1)

    def forward(self,x):
        y_pred1 = torch.sigmoid(self.linear1(x))
        y_pred2 = torch.sigmoid(self.linear2(y_pred1))
        return y_pred2

model = LogisticRegressionModel()
criterion = torch.nn.BCELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)

for epoch in range(100):
    y_pred = model(x_data)
    loss = criterion(y_pred,y_data)
    print(epoch,loss.item())
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

加载数据集,Dataset类的使用。这里数据集‘diabetes.csv’内容如下

数据集‘diabetes.csv’

-0.294118,0.487437,0.180328,-0.292929,0,0.00149028,-0.53117,-0.0333333,0
-0.882353,-0.145729,0.0819672,-0.414141,0,-0.207153,-0.766866,-0.666667,1
-0.0588235,0.839196,0.0491803,0,0,-0.305514,-0.492741,-0.633333,0
-0.882353,-0.105528,0.0819672,-0.535354,-0.777778,-0.162444,-0.923997,0,1
0,0.376884,-0.344262,-0.292929,-0.602837,0.28465,0.887276,-0.6,0
-0.411765,0.165829,0.213115,0,0,-0.23696,-0.894962,-0.7,1
-0.647059,-0.21608,-0.180328,-0.353535,-0.791962,-0.0760059,-0.854825,-0.833333,0
0.176471,0.155779,0,0,0,0.052161,-0.952178,-0.733333,1
-0.764706,0.979899,0.147541,-0.0909091,0.283688,-0.0909091,-0.931682,0.0666667,0
-0.0588235,0.256281,0.57377,0,0,0,-0.868488,0.1,0
-0.529412,0.105528,0.508197,0,0,0.120715,-0.903501,-0.7,1
0.176471,0.688442,0.213115,0,0,0.132638,-0.608027,-0.566667,0
0.176471,0.396985,0.311475,0,0,-0.19225,0.163962,0.2,1
-0.882353,0.899497,-0.0163934,-0.535354,1,-0.102832,-0.726729,0.266667,0
-0.176471,0.00502513,0,0,0,-0.105812,-0.653288,-0.633333,0
0,0.18593,0.377049,-0.0505051,-0.456265,0.365127,-0.596072,-0.666667,0
-0.176471,0.0753769,0.213115,0,0,-0.117735,-0.849701,-0.666667,0
-0.882353,0.0351759,-0.508197,-0.232323,-0.803783,0.290611,-0.910333,-0.6,1
-0.882353,0.155779,0.147541,-0.393939,-0.77305,0.0312965,-0.614859,-0.633333,0
-0.647059,0.266332,0.442623,-0.171717,-0.444444,0.171386,-0.465414,-0.8,1
-0.0588235,-0.00502513,0.377049,0,0,0.0551417,-0.735269,-0.0333333,1
-0.176471,0.969849,0.47541,0,0,0.186289,-0.681469,-0.333333,0
0.0588235,0.19598,0.311475,-0.292929,0,-0.135618,-0.842015,-0.733333,0
0.176471,0.256281,0.147541,-0.474747,-0.728132,-0.0730253,-0.891546,-0.333333,0
-0.176471,0.477387,0.245902,0,0,0.174367,-0.847139,-0.266667,0
-0.882353,-0.0251256,0.0819672,-0.69697,-0.669031,-0.308495,-0.650726,-0.966667,1
0.529412,0.457286,0.344262,-0.616162,-0.739953,-0.338301,-0.857387,0.2,1
-0.411765,0.175879,0.508197,0,0,0.0163934,-0.778822,-0.433333,1
-0.411765,0.0954774,0.229508,-0.474747,0,0.0730254,-0.600342,0.3,1
-0.647059,0.58794,0.245902,-0.272727,-0.420804,-0.0581222,-0.33988,-0.766667,0
-0.647059,-0.115578,-0.0491803,-0.777778,-0.87234,-0.260805,-0.838599,-0.966667,1
-0.294118,-0.0753769,0.508197,0,0,-0.406855,-0.906063,-0.766667,1
0.176471,0.226131,0.278689,-0.373737,0,-0.177347,-0.629377,-0.2,1
-0.529412,0.0351759,-0.0163934,-0.333333,-0.546099,-0.28465,-0.241674,-0.6,1
0.294118,0.386935,0.245902,0,0,-0.0104321,-0.707942,-0.533333,1
0.0588235,0.0251256,0.245902,-0.252525,0,-0.019374,-0.498719,-0.166667,0
-0.764706,-0.0954774,0.114754,-0.151515,0,0.138599,-0.637062,-0.8,0
-0.529412,0.115578,0.180328,-0.0505051,-0.510638,0.105812,0.12041,0.166667,0
-0.647059,0.809045,0.0491803,-0.494949,-0.834515,0.0134128,-0.835184,-0.833333,1
-0.176471,0.336683,0.377049,0,0,0.198212,-0.472246,-0.466667,1
-0.176471,0.0653266,0.508197,-0.636364,0,-0.323398,-0.865927,-0.1,1
0.0588235,0.718593,0.803279,-0.515152,-0.432624,0.353204,-0.450897,0.1,0
-0.176471,0.59799,0.0491803,0,0,-0.183308,-0.815542,-0.366667,1
0,0.809045,0.0819672,-0.212121,0,0.251863,0.549957,-0.866667,0
-0.882353,0.467337,-0.0819672,0,0,-0.114754,-0.58497,-0.733333,1
-0.764706,-0.286432,0.147541,-0.454545,0,-0.165425,-0.566183,-0.966667,1
-0.176471,0.0351759,0.0819672,-0.353535,0,0.165425,-0.772844,-0.666667,0
-0.176471,0.0552764,0,0,0,0,-0.806149,-0.9,1
-0.882353,0.0351759,0.311475,-0.777778,-0.806147,-0.421759,-0.64731,-0.966667,1
-0.882353,0.0150754,-0.180328,-0.69697,-0.914894,-0.278688,-0.617421,-0.833333,1
-0.411765,-0.115578,0.0819672,-0.575758,-0.945626,-0.272727,-0.774552,-0.7,1
-0.0588235,0.768844,0.47541,-0.313131,-0.29078,0.004471,-0.667805,0.233333,0
-0.176471,0.507538,0.0819672,-0.151515,-0.191489,0.0342773,-0.453459,-0.3,1
-0.882353,-0.266332,-0.180328,-0.79798,0,-0.314456,-0.854825,0,1
-0.176471,0.879397,0.114754,-0.212121,-0.281324,0.123696,-0.849701,-0.333333,0
0,0.00502513,0.442623,0.212121,-0.739953,0.394933,-0.24509,-0.666667,1
0,0.467337,0.344262,0,0,0.207154,0.454313,-0.233333,1
0,0.0552764,0.0491803,-0.171717,-0.664303,0.23696,-0.918873,-0.966667,1
-0.764706,-0.155779,0,0,0,0,-0.807003,0,1
-0.0588235,0.336683,0.180328,0,0,-0.019374,-0.836038,-0.4,0
-0.411765,-0.557789,0.0163934,0,0,-0.254843,-0.565329,-0.5,1
-0.764706,0.417085,-0.0491803,-0.313131,-0.6974,-0.242921,-0.469684,-0.9,1
-0.176471,0.145729,0.0819672,0,0,-0.0223547,-0.846285,-0.3,0
-0.411765,-0.00502513,0.213115,-0.454545,0,-0.135618,-0.893254,-0.633333,1
0,0.0954774,0.442623,-0.393939,0,-0.0312965,-0.336465,-0.433333,0
-0.764706,0.0954774,0.508197,0,0,0.272727,-0.345004,0.1,1
-0.882353,-0.0452261,0.0819672,-0.737374,-0.910165,-0.415797,-0.781383,-0.866667,1
-0.529412,0.467337,0.393443,-0.454545,-0.763593,-0.138599,-0.905209,-0.8,1
-0.764706,0.00502513,0.0819672,-0.59596,-0.787234,-0.019374,-0.326217,-0.766667,0
-0.411765,0.396985,0.0491803,-0.292929,-0.669031,-0.147541,-0.715628,-0.833333,1
0.529412,0.266332,0.47541,0,0,0.293592,-0.568745,-0.3,0
-0.529412,0.296482,0.409836,-0.59596,-0.361702,0.0461997,-0.869342,-0.933333,1
-0.882353,-0.20603,0.229508,-0.393939,0,-0.0461997,-0.728437,-0.966667,1
-0.882353,0,-0.213115,-0.59596,0,-0.263785,-0.947054,-0.966667,1
-0.176471,-0.376884,0.278689,0,0,-0.028316,-0.732707,-0.333333,1
-0.411765,-0.0452261,0.180328,-0.333333,0,0.123696,-0.75064,-0.8,1
0,0.316583,0,0,0,0.28763,-0.836038,-0.833333,0
-0.764706,0.125628,0.0819672,-0.555556,0,-0.254843,-0.804441,-0.9,1
-0.647059,0.135678,-0.278689,-0.737374,0,-0.33234,-0.947054,-0.966667,1
-0.764706,-0.256281,0,0,0,0,-0.979505,-0.966667,1
-0.176471,-0.165829,0.278689,-0.474747,-0.832151,-0.126677,-0.411614,-0.5,1
0,0.0150754,0.0655738,-0.434343,0,-0.266766,-0.864219,-0.966667,1
-0.411765,0.376884,0.770492,0,0,0.454545,-0.872758,-0.466667,0
-0.764706,0.105528,0.213115,-0.414141,-0.704492,-0.0342771,-0.470538,-0.8,1
0.529412,0.0653266,0.180328,0.0909091,0,0.0909091,-0.914603,-0.2,1
-0.764706,0.00502513,0.114754,-0.494949,-0.832151,0.147541,-0.789923,-0.833333,1
0.764706,0.366834,0.147541,-0.353535,-0.739953,0.105812,-0.935952,-0.266667,0
-0.882353,0.0753769,0.114754,-0.616162,0,-0.210134,-0.925705,-0.9,1
-0.882353,-0.19598,-0.0983607,0,0,-0.4307,-0.846285,0,1
-0.529412,0.236181,0.311475,-0.69697,-0.583924,-0.0461997,-0.688301,-0.566667,1
-0.176471,-0.18593,0.278689,-0.191919,-0.886525,0.391952,-0.843723,-0.3,1
-0.529412,0.346734,0.180328,0,0,-0.290611,-0.83006,0.3,0
-0.764706,0.427136,0.344262,-0.636364,-0.8487,-0.263785,-0.416738,0,1
-0.294118,0.447236,0.180328,-0.454545,-0.460993,0.0104323,-0.848847,-0.366667,1
-0.764706,-0.0753769,0.0163934,-0.434343,0,-0.0581222,-0.955594,-0.9,1
-0.882353,-0.286432,-0.213115,-0.636364,-0.820331,-0.391952,-0.790777,-0.966667,1
-0.294118,-0.0653266,-0.180328,-0.393939,-0.8487,-0.14456,-0.762596,-0.933333,1
-0.882353,0.226131,0.47541,0.030303,-0.479905,0.481371,-0.789069,-0.666667,0
-0.882353,0.638191,0.180328,0,0,0.162444,-0.0230572,-0.6,0
-0.882353,0.517588,-0.0163934,0,0,-0.222057,-0.913749,-0.966667,1
import numpy as np
import torch
from torch.utils.data import Dataset  # Dataset是一个抽象类,只能被继承,不能实例化
from torch.utils.data import DataLoader  # 可以直接实例化

class DiabetesDataset(Dataset):
    def __init__(self, filepath):
        xy = np.loadtxt(filepath, delimiter=',',dtype=np.float32)
        self.len = xy.shape[0]
        self.x_data = torch.from_numpy(xy[:, :-1])
        self.y_data = torch.from_numpy(xy[:, [-1]])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len

class myModel(torch.nn.Module):
    def __init__(self):
        super(myModel, self).__init__()
        self.linear1 = torch.nn.Linear(8, 60)  # 输入输出都是1维 wx+b
        self.bn1 = torch.nn.BatchNorm1d(60)
        self.linear2 = torch.nn.Linear(60, 50)
        self.bn2 = torch.nn.BatchNorm1d(50)
        self.linear3 = torch.nn.Linear(50, 1)
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.sigmoid(self.linear1(x))
        x = self.bn1(x)
        x = self.sigmoid(self.linear2(x))
        x = self.bn2(x)
        x = self.sigmoid(self.linear3(x))
        return x

dataset = DiabetesDataset('diabetes.csv')
train_loader = DataLoader(dataset = dataset,
                          batch_size=100,
                          shuffle=True,
                          num_workers = 0)

model=myModel()
criterion = torch.nn.BCELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr = 0.002)

for epoch in range(4001):
    for i, data in enumerate(train_loader, 0):
        inputs, labels = data
        y_pred = model(inputs)
        loss = criterion(y_pred, labels)
        if epoch%100==0:
            print('epoch=',epoch,',batch=', i,',loss=',loss.item())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

一个简单的CNN模型

import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader

batch_size = 64
# Preprocess the image and convert the image to a tensor
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,),0.3081,)
])
# load train set
train_dataset = datasets.MNIST(root='dataset/mnist/', train=True, download=False, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)

# load test set
test_dataset = datasets.MNIST(root='dataset/mnist', train=False, download=False, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)

# define neural network structure
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        # two convolution layer
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
        # one pooling layer
        self.pooling = torch.nn.MaxPool2d(2)
        self.fc = torch.nn.Linear(320,10)
        self.relu = torch.nn.ReLU()

    def forward(self,x):
        # bach_size take the number of dataset
        batch_size = x.size(0)
        print(x.shape)
        # x.shape = torch.Size([64, 1, 28, 28])
        x = self.relu(self.pooling(self.conv1(x)))
        # x.shape = torch.Size([64, 10, 12, 12])
        x = self.relu(self.pooling(self.conv2(x)))
        # x.shape = torch.Size([64, 20, 4, 4])
        x = x.view(batch_size, -1)
        # x.shape = torch.Size([64, 20, 4, 4])
        return self.fc(x)


model =Net()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader,0):
        inputs, target = data
        inputs, target = inputs.to(device), target.to(device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()
        running_loss +=loss.item()
        if batch_idx %300 ==299:
            print('[%d,%5d] loss:%.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0

def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # 沿着第一维度,找最大值的下标,返回最大值和下标
            total += labels.size(0)  # labels.size(0)=64 每个都是64个元素,就可以计算总的元素
            correct +=(predicted == labels).sum().item()
    print('Accuracy on test set: %d %%' %(100*total/total))

if __name__=='__main__':
    for epoch in range(10):
        print('epoch=',epoch)
        train(epoch)
        test()

保存神经网络模型

import torch
import torchvision
from torch import nn

vgg16 = torchvision.models.vgg16(pretrained=False)

# 保存方式1
torch.save(vgg16, "vgg16_method1.pth")

# 保存方式2(官方推荐方式)
torch.save(vgg16.state_dict(),"vgg16_method2.pth")


# 自定义模型并且保存
class myModule(nn.Module):
    def __init__(self):
        super(myModule,self).__init__()
        self.conv1 = nn.Conv2d(3,64,kernel_size=3)

    def forward(self,x):
        x=self.conv1(x)
        return x

Module = myModule()
torch.save(Module.state_dict(),"mymodel.pth")

加载神经网络模型

import torch
import torchvision
from torch import nn

# 方式1 加载方式1
vgg16 = torch.load("vgg16_method1.pth")
#print(vgg16, "vgg16_method")

# 方式1 加载方式1
vgg16 = torch.load("vgg16_method2.pth")
#print(vgg16, "vgg16_method")

# 加载自定义的模型,必须引入要加载模型的类,也就是引入结构
class myModule(nn.Module):
    def __init__(self):
        super(myModule,self).__init__()
        self.conv1 = nn.Conv2d(3,64,kernel_size=3)

    def forward(self,x):
        x=self.conv1(x)
        return x

model = myModule()
model.load_state_dict(torch.load("mymodel.pth"))
print(model)

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值