python constant_Python init.constant方法代码示例

本文整理汇总了Python中torch.nn.init.constant方法的典型用法代码示例。如果您正苦于以下问题:Python init.constant方法的具体用法?Python init.constant怎么用?Python init.constant使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块torch.nn.init的用法示例。

在下文中一共展示了init.constant方法的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: conv

​点赞 7

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False, transposed=False):

if transposed:

layer = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=1, output_padding=1, dilation=dilation, bias=bias)

# Bilinear interpolation init

w = torch.Tensor(kernel_size, kernel_size)

centre = kernel_size % 2 == 1 and stride - 1 or stride - 0.5

for y in range(kernel_size):

for x in range(kernel_size):

w[y, x] = (1 - abs((x - centre) / stride)) * (1 - abs((y - centre) / stride))

layer.weight.data.copy_(w.div(in_planes).repeat(in_planes, out_planes, 1, 1))

else:

padding = (kernel_size + 2 * (dilation - 1)) // 2

layer = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)

if bias:

init.constant(layer.bias, 0)

return layer

# Returns 2D batch normalisation layer

开发者ID:Kaixhin,项目名称:FCN-semantic-segmentation,代码行数:21,

示例2: __init__

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def __init__(self, num_classes, pretrained_net):

super().__init__()

self.pretrained_net = pretrained_net

self.relu = nn.ReLU(inplace=True)

self.conv5 = conv(512, 256, stride=2, transposed=True)

self.bn5 = bn(256)

self.conv6 = conv(256, 128, stride=2, transposed=True)

self.bn6 = bn(128)

self.conv7 = conv(128, 64, stride=2, transposed=True)

self.bn7 = bn(64)

self.conv8 = conv(64, 64, stride=2, transposed=True)

self.bn8 = bn(64)

self.conv9 = conv(64, 32, stride=2, transposed=True)

self.bn9 = bn(32)

self.conv10 = conv(32, num_classes, kernel_size=7)

init.constant(self.conv10.weight, 0) # Zero init

开发者ID:Kaixhin,项目名称:FCN-semantic-segmentation,代码行数:18,

示例3: init_params

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def init_params(net):

'''Init layer parameters.'''

for m in net.modules():

if isinstance(m, nn.Conv2d):

init.kaiming_normal(m.weight, mode='fan_out')

if m.bias:

init.constant(m.bias, 0)

elif isinstance(m, nn.BatchNorm2d):

init.constant(m.weight, 1)

init.constant(m.bias, 0)

elif isinstance(m, nn.Linear):

init.normal(m.weight, std=1e-3)

if m.bias:

init.constant(m.bias, 0)

#_, term_width = os.popen('stty size', 'r').read().split()

# term_width = int(term_width)

开发者ID:leehomyc,项目名称:mixup_pytorch,代码行数:20,

示例4: reset_parameters

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def reset_parameters(self):

"""

Initialize parameters following the way proposed in the paper.

"""

# The input-to-hidden weight matrix is initialized orthogonally.

init.orthogonal(self.weight_ih.data)

# The hidden-to-hidden weight matrix is initialized as an identity

# matrix.

weight_hh_data = torch.eye(self.hidden_size)

weight_hh_data = weight_hh_data.repeat(1, 4)

self.weight_hh.data.set_(weight_hh_data)

# The bias is just set to zero vectors.

init.constant(self.bias.data, val=0)

# Initialization of BN parameters.

self.bn_ih.reset_parameters()

self.bn_hh.reset_parameters()

self.bn_c.reset_parameters()

self.bn_ih.bias.data.fill_(0)

self.bn_hh.bias.data.fill_(0)

self.bn_ih.weight.data.fill_(0.1)

self.bn_hh.weight.data.fill_(0.1)

self.bn_c.weight.data.fill_(0.1)

开发者ID:LiyuanLucasLiu,项目名称:RAdam,代码行数:25,

示例5: __init__

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def __init__(self, dim, dropout=0.2, slope=0.0):

super(SDAE, self).__init__()

self.in_dim = dim[0]

self.nlayers = len(dim)-1

self.reluslope = slope

self.enc, self.dec = [], []

for i in range(self.nlayers):

self.enc.append(nn.Linear(dim[i], dim[i+1]))

setattr(self, 'enc_{}'.format(i), self.enc[-1])

self.dec.append(nn.Linear(dim[i+1], dim[i]))

setattr(self, 'dec_{}'.format(i), self.dec[-1])

self.base = []

for i in range(self.nlayers):

self.base.append(nn.Sequential(*self.enc[:i]))

self.dropmodule1 = nn.Dropout(p=dropout)

self.dropmodule2 = nn.Dropout(p=dropout)

self.loss = nn.MSELoss(size_average=True)

# initialization

for m in self.modules():

if isinstance(m, nn.Linear):

init.normal(m.weight, std=1e-2)

if m.bias.data is not None:

init.constant(m.bias, 0)

开发者ID:shahsohil,项目名称:DCC,代码行数:26,

示例6: __init__

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def __init__(self, dim, slope=0.0):

super(extractSDAE, self).__init__()

self.in_dim = dim[0]

self.nlayers = len(dim)-1

self.reluslope = slope

self.enc, self.dec = [], []

for i in range(self.nlayers):

self.enc.append(nn.Linear(dim[i], dim[i+1]))

setattr(self, 'enc_{}'.format(i), self.enc[-1])

self.dec.append(nn.Linear(dim[i+1], dim[i]))

setattr(self, 'dec_{}'.format(i), self.dec[-1])

self.base = []

for i in range(self.nlayers):

self.base.append(nn.Sequential(*self.enc[:i]))

# initialization

for m in self.modules():

if isinstance(m, nn.Linear):

init.normal(m.weight, std=1e-2)

if m.bias.data is not None:

init.constant(m.bias, 0)

开发者ID:shahsohil,项目名称:DCC,代码行数:23,

示例7: reset_params

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def reset_params(self):

for m in self.modules():

if isinstance(m, nn.Conv2d):

init.kaiming_normal(m.weight, mode='fan_in')

if m.bias is not None:

init.constant(m.bias, 0)

elif isinstance(m, nn.BatchNorm2d):

init.constant(m.weight, 1)

init.constant(m.bias, 0)

elif isinstance(m, nn.BatchNorm1d):

init.constant(m.weight, 1)

init.constant(m.bias, 0)

elif isinstance(m, nn.Linear):

init.kaiming_normal(m.weight, mode='fan_in')

if m.bias is not None:

init.constant(m.bias, 0)

开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:18,

示例8: weights_init

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def weights_init(init_type='xavier'):

def init_fun(m):

classname = m.__class__.__name__

if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):

if init_type == 'normal':

init.normal(m.weight.data, 0.0, 0.02)

elif init_type == 'xavier':

init.xavier_normal(m.weight.data, gain=math.sqrt(2))

elif init_type == 'kaiming':

init.kaiming_normal(m.weight.data, a=0, mode='fan_in')

elif init_type == 'orthogonal':

init.orthogonal(m.weight.data, gain=math.sqrt(2))

elif init_type == 'default':

pass

else:

assert 0, "Unsupported initialization: {}".format(init_type)

if hasattr(m, 'bias') and m.bias is not None:

init.constant(m.bias.data, 0.0)

elif (classname.find('Norm') == 0):

if hasattr(m, 'weight') and m.weight is not None:

init.constant(m.weight.data, 1.0)

if hasattr(m, 'bias') and m.bias is not None:

init.constant(m.bias.data, 0.0)

return init_fun

开发者ID:Xiaoming-Yu,项目名称:DMIT,代码行数:26,

示例9: init_params

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def init_params(net):

'''Init layer parameters.'''

for m in net.modules():

if isinstance(m, nn.Conv2d):

init.kaiming_normal(m.weight, mode='fan_out')

if m.bias:

init.constant(m.bias, 0)

elif isinstance(m, nn.BatchNorm2d):

init.constant(m.weight, 1)

init.constant(m.bias, 0)

elif isinstance(m, nn.Linear):

init.normal(m.weight, std=1e-3)

if m.bias:

init.constant(m.bias, 0)

开发者ID:zhunzhong07,项目名称:Random-Erasing,代码行数:16,

示例10: conv

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False,

transposed=False):

"""

Returns 2D convolutional layer with space-preserving padding

"""

if transposed:

layer = nn.ConvTranspose2d(

in_planes, out_planes, kernel_size=kernel_size, stride=stride,

padding=1, output_padding=1, dilation=dilation, bias=bias)

# Bilinear interpolation init

w = torch.Tensor(kernel_size, kernel_size)

centre = kernel_size % 2 == 1 and stride - 1 or stride - 0.5

for y in range(kernel_size):

for x in range(kernel_size):

w[y, x] = (1 - abs((x - centre) / stride)) * (1 - abs(

(y - centre) / stride))

layer.weight.data.copy_(

w.div(in_planes).repeat(in_planes, out_planes, 1, 1))

else:

padding = (kernel_size + 2 * (dilation - 1)) // 2

layer = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,

stride=stride, padding=padding, dilation=dilation,

bias=bias)

if bias:

init.constant(layer.bias, 0)

return layer

开发者ID:ehsanik,项目名称:dogTorch,代码行数:28,

示例11: conv_init

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def conv_init(m):

classname = m.__class__.__name__

if classname.find('Conv') != -1:

init.xavier_uniform(m.weight, gain=np.sqrt(2))

init.constant(m.bias, 0)

elif classname.find('BatchNorm') != -1:

init.constant(m.weight, 1)

init.constant(m.bias, 0)

开发者ID:wgrathwohl,项目名称:JEM,代码行数:10,

示例12: init_weights_xavier

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def init_weights_xavier(model):

if isinstance(model, nn.Conv2d):

init.xavier_normal(model.weight)

init.constant(model.bias, 0)

开发者ID:minerva-ml,项目名称:steppy-toolkit,代码行数:6,

示例13: init_weights_he

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def init_weights_he(model):

if isinstance(model, nn.Conv2d):

init.kaiming_normal(model.weight)

init.constant(model.bias, 0)

开发者ID:minerva-ml,项目名称:steppy-toolkit,代码行数:6,

示例14: reset_parameters

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def reset_parameters(self):

init.constant(self.weight,self.gamma)

开发者ID:soo89,项目名称:CSD-SSD,代码行数:4,

示例15: init_weights

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def init_weights(net, init_type='normal', gain=0.02):

def init_func(m):

classname = m.__class__.__name__

if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):

init.normal(m.weight.data, 0.0, gain)

if hasattr(m, 'bias') and m.bias is not None:

init.constant(m.bias.data, 0.0)

elif classname.find('BatchNorm2d') != -1:

init.normal(m.weight.data, 1.0, gain)

init.constant(m.bias.data, 0.0)

print('Network initialized with weights sampled from N(0,0.02).')

net.apply(init_func)

开发者ID:arnab39,项目名称:cycleGAN-PyTorch,代码行数:15,

示例16: reset_params

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def reset_params(self):

for m in self.modules():

if isinstance(m, nn.Conv2d):

init.kaiming_normal(m.weight, mode='fan_out')

if m.bias is not None:

init.constant(m.bias, 0)

elif isinstance(m, nn.BatchNorm2d):

init.constant(m.weight, 1)

init.constant(m.bias, 0)

elif isinstance(m, nn.Linear):

init.normal(m.weight, std=0.001)

if m.bias is not None:

init.constant(m.bias, 0)

开发者ID:gddingcs,项目名称:Dispersion-based-Clustering,代码行数:15,

示例17: weights_init_normal

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def weights_init_normal(m):

classname = m.__class__.__name__

#print(classname)

if classname.find('Conv') != -1:

init.normal(m.weight.data, 0.0, 0.02)

elif classname.find('Linear') != -1:

init.normal(m.weight.data, 0.0, 0.02)

elif classname.find('BatchNorm') != -1:

init.normal(m.weight.data, 1.0, 0.02)

init.constant(m.bias.data, 0.0)

开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:12,

示例18: weights_init_xavier

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def weights_init_xavier(m):

classname = m.__class__.__name__

#print(classname)

if classname.find('Conv') != -1:

init.xavier_normal(m.weight.data, gain=1)

elif classname.find('Linear') != -1:

init.xavier_normal(m.weight.data, gain=1)

elif classname.find('BatchNorm') != -1:

init.normal(m.weight.data, 1.0, 0.02)

init.constant(m.bias.data, 0.0)

开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:12,

示例19: weights_init_kaiming

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def weights_init_kaiming(m):

classname = m.__class__.__name__

#print(classname)

if classname.find('Conv') != -1:

init.kaiming_normal(m.weight.data, a=0, mode='fan_in')

elif classname.find('Linear') != -1:

init.kaiming_normal(m.weight.data, a=0, mode='fan_in')

elif classname.find('BatchNorm') != -1:

init.normal(m.weight.data, 1.0, 0.02)

init.constant(m.bias.data, 0.0)

开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:12,

示例20: weights_init_orthogonal

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def weights_init_orthogonal(m):

classname = m.__class__.__name__

#print(classname)

if classname.find('Conv') != -1:

init.orthogonal(m.weight.data, gain=1)

elif classname.find('Linear') != -1:

init.orthogonal(m.weight.data, gain=1)

elif classname.find('BatchNorm') != -1:

init.normal(m.weight.data, 1.0, 0.02)

init.constant(m.bias.data, 0.0)

开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:12,

示例21: reset_parameters

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def reset_parameters(self,

init_fc_w=xavier_uniform,

init_fc_b=lambda x: constant(x, 0),

init_embed=lambda x: normal(x, std=0.05),

**kwargs):

"""Resets the trainable weights."""

def set_constant_row(parameters, iRow=0, value=0):

"""Return `parameters` with row `iRow` as s constant `value`."""

data = parameters.data

data[iRow, :] = value

return torch.nn.Parameter(data, requires_grad=parameters.requires_grad)

np.random.seed(self.seed)

if self.seed is not None:

torch.manual_seed(self.seed)

if not self.isHash:

self.embedding.weight = init_embed(self.embedding.weight)

if self.paddingIdx is not None:

# Unfortunately has to set weight to 0 even when paddingIdx =0

self.embedding.weight = set_constant_row(self.embedding.weight)

else:

self.embedding.reset_parameters(**kwargs)

self.fc1.weight = init_fc_w(self.fc1.weight)

self.fc1.biais = init_fc_b(self.fc1.weight)

开发者ID:YannDubs,项目名称:Hash-Embeddings,代码行数:29,

示例22: bn

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def bn(planes):

layer = nn.BatchNorm2d(planes)

# Use mean 0, standard deviation 1 init

init.constant(layer.weight, 1)

init.constant(layer.bias, 0)

return layer

开发者ID:Kaixhin,项目名称:FCN-semantic-segmentation,代码行数:8,

示例23: __init__

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def __init__(self, num_cls=19, pretrained=True, weights_init=None,

output_last_ft=False):

super().__init__()

self.output_last_ft = output_last_ft

self.vgg = make_layers(vgg.cfg['D'])

self.vgg_head = nn.Sequential(

nn.Conv2d(512, 4096, 7),

nn.ReLU(inplace=True),

nn.Dropout2d(p=0.5),

nn.Conv2d(4096, 4096, 1),

nn.ReLU(inplace=True),

nn.Dropout2d(p=0.5),

nn.Conv2d(4096, num_cls, 1)

)

self.upscore2 = self.upscore_pool4 = Bilinear(2, num_cls)

self.upscore8 = Bilinear(8, num_cls)

self.score_pool4 = nn.Conv2d(512, num_cls, 1)

for param in self.score_pool4.parameters():

init.constant(param, 0)

self.score_pool3 = nn.Conv2d(256, num_cls, 1)

for param in self.score_pool3.parameters():

init.constant(param, 0)

if pretrained:

if weights_init is not None:

self.load_weights(torch.load(weights_init))

else:

self.load_base_weights()

开发者ID:jhoffman,项目名称:cycada_release,代码行数:30,

示例24: forward

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def forward(self, x):

input = x

x = F.pad(x, (99, 99, 99, 99), mode='constant', value=0)

intermediates = {}

fts_to_save = {16: 'pool3', 23: 'pool4'}

for i, module in enumerate(self.vgg):

x = module(x)

if i in fts_to_save:

intermediates[fts_to_save[i]] = x

ft_to_save = 5 # Dropout before classifier

last_ft = {}

for i, module in enumerate(self.vgg_head):

x = module(x)

if i == ft_to_save:

last_ft = x

_, _, h, w = x.size()

upscore2 = self.upscore2(x)

pool4 = intermediates['pool4']

score_pool4 = self.score_pool4(0.01 * pool4)

score_pool4c = _crop(score_pool4, upscore2, offset=5)

fuse_pool4 = upscore2 + score_pool4c

upscore_pool4 = self.upscore_pool4(fuse_pool4)

pool3 = intermediates['pool3']

score_pool3 = self.score_pool3(0.0001 * pool3)

score_pool3c = _crop(score_pool3, upscore_pool4, offset=9)

fuse_pool3 = upscore_pool4 + score_pool3c

upscore8 = self.upscore8(fuse_pool3)

score = _crop(upscore8, input, offset=31)

if self.output_last_ft:

return score, last_ft

else:

return score

开发者ID:jhoffman,项目名称:cycada_release,代码行数:36,

示例25: weights_init_normal

​点赞 5

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def weights_init_normal(m):

classname = m.__class__.__name__

# print(classname)

if classname.find('Conv') != -1:

init.normal(m.weight.data, 0.0, 0.02)

elif classname.find('Linear') != -1:

init.normal(m.weight.data, 0.0, 0.02)

elif classname.find('BatchNorm2d') != -1:

init.normal(m.weight.data, 1.0, 0.02)

init.constant(m.bias.data, 0.0)

开发者ID:joelmoniz,项目名称:DepthNets,代码行数:12,

注:本文中的torch.nn.init.constant方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值