本文整理汇总了Python中torchvision.models.vgg16方法的典型用法代码示例。如果您正苦于以下问题:Python models.vgg16方法的具体用法?Python models.vgg16怎么用?Python models.vgg16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块torchvision.models的用法示例。
在下文中一共展示了models.vgg16方法的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
点赞 7
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16 [as 别名]
def __init__(self, alignsize = 8, reddim = 32, loadweight = True, model = None, downsample = 4):
super(crop_model_multi_scale_shared, self).__init__()
if model == 'shufflenetv2':
self.Feat_ext = shufflenetv2_base(loadweight,downsample)
self.DimRed = nn.Conv2d(812, reddim, kernel_size=1, padding=0)
elif model == 'mobilenetv2':
self.Feat_ext = mobilenetv2_base(loadweight,downsample)
self.DimRed = nn.Conv2d(448, reddim, kernel_size=1, padding=0)
elif model == 'vgg16':
self.Feat_ext = vgg_base(loadweight,downsample)
self.DimRed = nn.Conv2d(1536, reddim, kernel_size=1, padding=0)
elif model == 'resnet50':
self.Feat_ext = resnet50_base(loadweight,downsample)
self.DimRed = nn.Conv2d(3584, reddim, kernel_size=1, padding=0)
self.downsample2 = nn.UpsamplingBilinear2d(scale_factor=1.0/2.0)
self.upsample2 = nn.UpsamplingBilinear2d(scale_factor=2.0)
self.RoIAlign = RoIAlignAvg(alignsize, alignsize, 1.0/2**downsample)
self.RoDAlign = RoDAlignAvg(alignsize, alignsize, 1.0/2**downsample)
self.FC_layers = fc_layers(reddim*2, alignsize)
开发者ID:HuiZeng,项目名称:Grid-Anchor-based-Image-Cropping-Pytorch,代码行数:23,
示例2: get_image_format
点赞 7
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16 [as 别名]
def get_image_format(framework_name, model_name):
"""Return the correct input range and shape for target framework and model"""
special_shape = {'pytorch':{'inception_v3': (299, 299)},
'keras': {'xception': (299, 299),
'inception_v3':(299, 299),
'yolo_v3': (416, 416),
'ssd300': (300, 300)}}
special_bound = {'keras':{'vgg16':(0, 255),
'vgg19':(0, 255),
'resnet50':(0, 255),
'ssd300': (0, 255)},
'cloud': {'aip_antiporn': (0, 255),
'google_safesearch': (0, 255),
'google_objectdetection': (0, 255)}}
default_shape = (224, 224)
default_bound = (0, 1)
if special_shape.get(framework_name, None):
if special_shape[framework_name].get(model_name, None):
default_shape = special_shape[framework_name][model_name]
if special_bound.get(framework_name, None):
if special_bound[framework_name].get(model_name, None):
default_bound = special_bound[framework_name][model_name]
return {'shape': default_shape, 'bounds': default_bound}
开发者ID:advboxes,项目名称:perceptron-benchmark,代码行数:25,
示例3: __init__
点赞 6
# 需要导入模块: from torchvision import models [as 别名]
# 或者: from torchvision.models import vgg16 [as 别名]
def __init__(self):
super(DANNet, self).__init__()
model = models.vgg16(pretrained=True) #False
self.features = model.features
for param in self.features.parameters(): #NOTE: prune:True // finetune:False
param.requires_grad = True
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(25088, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
)
self.cls_fc = nn.Linear(4096, 31)
开发者ID:jindongwang,项目名称:transferlearning,代码行数:19,
示例4: __init__
点赞 6
# 需要导入模块: