Pytorch基础(三)

多层感知机的简洁实现

# -*- coding: utf-8 -*-

import torch
from torch import nn
from d2l import torch as d2l

net = nn.Sequential(nn.Flatten(),
                    nn.Linear(784, 256),
                    nn.ReLU(),
                    nn.Linear(256, 10))
def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, std=0.01)
        
net.apply(init_weights)

batch_size, lr, num_epochs = 256, 0.1, 10
loss = nn.CrossEntropyLoss()
trainer = torch.optim.SGD(net.parameters(), lr=lr)

train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)       

Out:
在这里插入图片描述

多项式拟合

# -*- coding: utf-8 -*-
import torch
import numpy as np
import math
from torch import nn
from d2l import torch as d2l

#多项式的最大阶数
max_degree = 20
#训练集和测试集大小
n_train, n_test = 100,100
true_w = np.zeros(max_degree)
true_w[:4] = np.array([5, 1.2, -3.4, 5.6])

features = np.random.normal(size=(n_train+n_test, 1))
np.random.shuffle(features)
#poly_features的shape为(200, 20)
poly_features = np.power(features, np.arange(max_degree).reshape(1, -1))
#gamma(n)=(n-1)!
for i in range(max_degree):
    poly_features[:, i] /= math.gamma(i + 1)
#labels的shape为(200,)
labels = np.dot(poly_features, true_w)
labels += np.random.normal(scale=0.1, size=labels.shape)

#ndarry转换为tensor
true_w, features, poly_features, labels = [torch.tensor(x, dtype=d2l.float32) for x in [true_w, features, poly_features, labels]]

#评估损失
def evaluate_loss(net, data_iter, loss):
    metric = d2l.Accumulator(2)
    for X,y in data_iter:
        out = net(X)
        y = y.reshape(out.shape)
        l = loss(out, y)
        metric.add(l.sum(), l.numel())
    return metric[0]/metric[1]

#定义训练函数
def train(train_features, test_features, train_labels, test_labels, num_epochs=400):
    loss = nn.MSELoss()
    input_shape = train_features.shape[-1]
    #不设置偏置
    net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))
    batch_size = min(10, train_labels.shape[0])
    train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size)
    test_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size, is_train=False)
    trainer = torch.optim.SGD(net.parameters(), lr=0.01)
    animator = d2l.Animator(xlabel="epoch", ylabel="loss", yscale="log", xlim=[1, num_epochs], ylim=[1e-3,1e2],
                            legend=["train","test"])
    for epoch in range(num_epochs):
        d2l.train_epoch_ch3(net, train_iter, loss, trainer)
        if epoch==0 or (epoch+1) % 20 == 0:
            animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss),
                                     evaluate_loss(net, test_iter, loss)))
    print("weight:", net[0].weight.data.numpy())

#从多项式特征中选择前4个维度
train(poly_features[:n_train, :4], poly_features[n_train:, :4],
      labels[:n_train], labels[n_train:])

Out:
在这里插入图片描述

高维线性回归

# -*- coding: utf-8 -*-

import torch
from d2l import torch as d2l
from torch import nn

n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5
true_w, true_b = torch.ones((num_inputs,1)) * 0.01, 0.05
train_data = d2l.synthetic_data(true_w, true_b, n_train)
train_iter = d2l.load_array(train_data, batch_size)
test_data = d2l.synthetic_data(true_w, true_b, n_test)
test_iter = d2l.load_array(test_data, batch_size)

#初始化模型参数
def init_params():
    w = torch.normal(0, 1, size=(num_inputs, 1), requires_grad=True)
    b = torch.zeros(1, requires_grad=True)
    return [w,b]

#定义L2范数惩罚项
def l2_penalty(w):
    return torch.sum(w.pow(2))/2

#定义训练
def train(lambd):
    w, b = init_params()
    net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
    num_epochs, lr = 100, 0.003
    animator = d2l.Animator(xlabel="epochs", ylabel="loss", yscale="log", xlim=[5, num_epochs],
                            legend=["train","test"])
    for epoch in range(num_epochs):
        for X,y in train_iter:
            with torch.enable_grad():
                l = loss(net(X),y) + lambd * l2_penalty(w)
            l.sum().backward()
            d2l.sgd([w,b], lr, batch_size)
        if (epoch + 1) % 5 == 0:
            animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss),
                                     d2l.evaluate_loss(net, test_iter, loss)))
    print("w的L2范数是:", torch.norm(w).item())

#lambd=0
train(lambd=0)

Out:
在这里插入图片描述
w的L2范数是: 14.963050842285156

高维线性回归的简洁实现

# -*- coding: utf-8 -*-
import torch
from torch import nn
from d2l import torch as d2l

n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5
true_w, true_b = torch.ones((num_inputs,1)) * 0.01, 0.05
train_data = d2l.synthetic_data(true_w, true_b, n_train)
train_iter = d2l.load_array(train_data, batch_size)
test_data = d2l.synthetic_data(true_w, true_b, n_test)
test_iter = d2l.load_array(test_data, batch_size)

def train_concise(wd):
    net = nn.Sequential(nn.Linear(num_inputs, 1))
    for param in net.parameters():
        param.data.normal_()
    loss = nn.MSELoss()
    num_epochs, lr = 100, 0.003
    trainer = torch.optim.SGD(
        [{"params":net[0].weight, "weight_decay":wd},
        {"params":net[0].bias}], lr=lr)
    animator = d2l.Animator(xlabel="epochs", ylabel="loss", yscale="log",
                            xlim=[5, num_epochs], legend=["train","test"])
    for epoch in range(num_epochs):
        for X,y in train_iter:
            with torch.enable_grad():
                trainer.zero_grad()
                l = loss(net(X), y)
            l.backward()
            trainer.step()
        if (epoch + 1) % 5 == 0:
            animator.add(epoch+1, (d2l.evaluate_loss(net, train_iter, loss),
                                   d2l.evaluate_loss(net, test_iter, loss)))
    print("w的L2范数:",net[0].weight.norm().item())

train_concise(0)

Out:
在这里插入图片描述
w的L2范数: 12.891228675842285

dropout层

# -*- coding: utf-8 -*-

import torch
from d2l import torch as d2l
from torch import nn

def dropout_layer(X, dropout):
    assert 0 <= dropout <= 1
    if dropout == 1:
        return torch.zeros_like(X)
    if dropout == 0:
        return X
    mask = (torch.rand(X.shape) > dropout).float()
    return mask * X / (1.0-dropout)

#定义模型参数
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256

#定义模型
dropout1, dropout2 = 0.2, 0.5
class Net(nn.Module):
    def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training = True):
        super(Net, self).__init__()
        self.num_inputs = num_inputs
        self.training = is_training
        self.lin1 = nn.Linear(num_inputs, num_hiddens1)
        self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
        self.lin3 = nn.Linear(num_hiddens2, num_outputs)
        self.relu = nn.ReLU()
    def forward(self, X):
        H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
        #只有在训练模型时才使用dropout
        if self.training == True:
            #在第一个全连接层之后加一个dropout层
            H1 = dropout_layer(H1, dropout1)
        H2 = self.relu(self.lin2(H1))
        if self.training == True:
            #在第二个全连接层之后添加一个dropout层
            H2 = dropout_layer(H2, dropout2)
        out = self.lin3(H2)
        return out
            
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)

#训练和测试
num_epochs, lr, batch_size = 10, 0.5, 10
loss = nn.CrossEntropyLoss()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)  
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
PyTorch是一个广泛应用于深度学习领域的开源机器学习框架,也被用于三维重建算法的实现。深度学习三维重建算法通常使用PyTorch进行开发和训练。 其中,推荐莫烦的PyTorch教程,可以在B站上搜索莫烦Python来学习PyTorch基础知识和应用技巧。另外,还有一本书籍《深度学习框架PyTorch:入门与实践》也可以作为深度学习三维重建的参考资料。 在深度学习三维重建领域,一个著名的算法是MVSNet,其源码和PyTorch版实现可以在ECCV-2018的论文中找到。这个算法结合了多视角的图像信息,能够生成高质量的三维重建结果。 此外,关于基于深度学习的最新三维重建算法的综述论文也提供了对近几年相关工作的详细总结,包括网络结构、输入输出和数据集等方面的内容。在阅读具体论文之前,阅读这篇综述论文可以对所有的三维重建算法有一个大致的了解。该综述论文的链接为:https://arxiv.org/pdf/1906.06543v3.pdf。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *3* [基于深度学习的单视图三维重建算法学习路线](https://blog.csdn.net/Yong_Qi2015/article/details/122505161)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *2* [深度学习三维重建 MVSNet-ECCV-2018 (源码、pytorch版、原文、注释、译文、批注)](https://download.csdn.net/download/m0_51233386/88211116)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值