#丢弃法
import torch
from torch import nn
from d2l import torch as d2l
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def dropout_layer(x,dropout):
assert 0 <= dropout <= 1
if dropout == 1:
return torch.zeros_like(x)
if dropout == 0:
return x
#随机生成一个相同维度的矩阵 根据值和dropout对比确定是否丢弃该元素
mask = (torch.rand(x.shape) > dropout).float()
#对剩余的数据进行缩放
return mask*x / (1.0-dropout)
num_inputs,num_outputs,num_hiddens1,num_hiddens2 = 784,10,256,256
dropout1,dropout2 = 0.2,0.5
class Net(nn.Module):
def __init__(self,num_inputs,num_outputs,num_hiddens1,num_hiddens2,is_training=True):
super(Net,self).__init__()
self.num_inputs = num_inputs
self.training = is_training
self.lin1 = nn.Linear(num_inputs,num_hiddens1)
self.lin2 = nn.Linear(num_hiddens1,num_hiddens2)
self.lin3 = nn.Linear(num_hiddens2,num_outputs)
self.relu = nn.ReLU()
def forward(self,x):
h1 = self.relu(self.lin1(x.reshape((-1,self.num_inputs))))
if self.training == True:
h1 = dropout_layer(h1,dropout1)
h2 = self.relu(self.lin2(h1))
if self.training == True:
h2 = dropout_layer(h2,dropout2)
out = self.lin3(h2)
return out
net = Net(num_inputs,num_outputs,num_hiddens1,num_hiddens2)
num_epochs,lr,batch_size = 10,0.5,256
loss = nn.CrossEntropyLoss(reduction='none')
train_iter,test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(),lr=lr)
d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,trainer)
d2l.plt.show()
13-丢弃法 动手深度学习
最新推荐文章于 2023-05-25 20:48:08 发布
该代码示例展示了如何在PyTorch中实现dropout层,用于神经网络的正则化,防止过拟合。网络包含两个隐藏层,每个隐藏层后面跟随一个dropout层,在训练模式下启用。模型应用于Fashion_MNIST数据集,并使用SGD优化器进行训练。
摘要由CSDN通过智能技术生成