relupython什么意思_Python nn.ReLU方法代码示例

本文详细介绍了Python中torch.nn.ReLU方法的使用,包括多个代码示例,涵盖了nn.ReLU在图像解码器、GoogLeNet、ResNet、RNN、Deformable Convolution等场景的应用。通过这些示例,读者可以了解如何在实际项目中使用nn.ReLU进行激活操作。
摘要由CSDN通过智能技术生成

本文整理汇总了Python中torch.nn.ReLU方法的典型用法代码示例。如果您正苦于以下问题:Python nn.ReLU方法的具体用法?Python nn.ReLU怎么用?Python nn.ReLU使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块torch.nn的用法示例。

在下文中一共展示了nn.ReLU方法的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

​点赞 7

# 需要导入模块: from torch import nn [as 别名]

# 或者: from torch.nn import ReLU [as 别名]

def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):

super(ImageDecoder, self).__init__()

ngf = ngf * (2 ** (n_layers - 2))

layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False),

nn.BatchNorm2d(ngf),

nn.ReLU(True)]

for i in range(1, n_layers - 1):

layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),

nn.BatchNorm2d(ngf // 2),

nn.ReLU(True)]

ngf = ngf // 2

layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]

if activation == 'tanh':

layers += [nn.Tanh()]

elif activation == 'sigmoid':

layers += [nn.Sigmoid()]

else:

raise NotImplementedError

self.main = nn.Sequential(*layers)

开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:25,

示例2: __init__

​点赞 7

# 需要导入模块: from torch import nn [as 别名]

# 或者: from torch.nn import ReLU [as 别名]

def __init__(self):

super(GoogLeNet, self).__init__()

self.pre_layers = nn.Sequential(

nn.Conv2d(3, 192, kernel_size=3, padding=1),

nn.BatchNorm2d(192),

nn.ReLU(True),

)

self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)

self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)

self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)

self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)

self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)

self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)

self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

self.avgpool = nn.AvgPool2d(8, stride=1)

self.linear = nn.Linear(1024, 10)

开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:26,

示例3: __init__

​点赞 6

# 需要导入模块: from torch import nn [as 别名]

# 或者: from torch.nn import ReLU [as 别名]

def __init__(self, block, layers, num_classes=1000):

self.inplanes = 64

super(MyResNet, self).__init__()

self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,

bias=False)

self.bn1 = nn.BatchNorm2d(64)

self.relu = nn.ReLU(inplace=True)

self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

# note the increasing dilation

self.layer1 = self._make_layer(block, 64, layers[0])

self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)

self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)

self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)

# these layers will not be used

self.avgpool = nn.AvgPool2d(7)

self.fc = nn.Linear(512 * block.expansion, num_classes)

for m in self.modules():

if isinstance(m, nn.Conv2d):

n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels

m.weight.data.normal_(0, math.sqrt(2. / n))

基于300条数据用CNN多分类预测时,训练精度特别差,代码如下class Model(Module): def __init__(self): super(Model, self).__init__() self.conv1_1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=(3,3),padding=1) self.bn1_1 = nn.BatchNorm2d(64) self.relu1_1 = nn.ReLU() self.pool1 = nn.MaxPool2d(kernel_size=4, stride=4) self.conv2_1 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(3,3),padding=1) self.bn2_1 = nn.BatchNorm2d(128) self.relu2_1 = nn.ReLU() self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(in_channels=128,out_channels=256,kernel_size=(3,3),padding=1) self.bn3_1 = nn.BatchNorm2d(256) self.relu3_1 = nn.ReLU() self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv4_1 = nn.Conv2d(in_channels=256,out_channels=512,kernel_size=(3,3)) self.bn4_1 = nn.BatchNorm2d(512) self.relu4_1 = nn.ReLU() self.conv4_2 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=(3,3)) self.bn4_2 = nn.BatchNorm2d(512) self.relu4_2 = nn.ReLU() self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=(3,3)) self.bn5_1 = nn.BatchNorm2d(512) self.relu5_1 = nn.ReLU() self.conv5_2 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=(3,3)) self.bn5_2 = nn.BatchNorm2d(512) self.relu5_2 = nn.ReLU() self.pool5 = nn.AdaptiveAvgPool2d(5) self.dropout1 = nn.Dropout(p=0.3) self.fc1=nn.Linear(512*5*5,512) self.relu6=nn.ReLU() self.dropout2 = nn.Dropout(p=0.2) self.fc2=nn.Linear(512,141) ,具体如何修改代码
05-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值