模型打包
def load_model(model_path):
package = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(package['state_dict'])
模型解释
class Model()
@torch.jit.unused #默认调用的是model.forward,但是此处使用的是inference,所有要在torch.jit。
def forward(self, inputs):
xxxxx
@torch.jit.export
def inference(self, inputs):
# type: (Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]) -> Tensor
- 调用的model.inference,但引入的是forward函数
- 在forward函数前边加修饰器
@torch.jit.unused
,在inference函数前边加修饰器@torch.jit.export
- 在forward函数前边加修饰器
cudnn
Caffe2: Cannot find cuDNN library. Turning the option off
确定cudnn按照完好,安装命令dpkg -i dir/libcudnn7-dev_7.6.0.64-1+cuda10.0_amd64.deb
变量
安装c++的方式,用到的变量都在init里边提前声明清楚
结构不兼容的问题
修饰器
forward中如果掉其他的本类中的函数,而且多次调用,可能会报错
NotSupportedError(r, "Only a single generator is currently supported")
torch.jit.frontend.NotSupportedError: Only a single generator is currently supported:
可以把调用的函数写在类外
@torch.jit.ignore
不需要的函数@torch.jit.export
调用的函数- torch中,不要混入numpy的计算
nn.Parameter
- nn.Parameter((torch.randn(10,10))在torch.jit.script时候出错
在使用nn.Parameter对象时,需要将其包装在torch.nn.ParameterList或torch.nn.ParameterDict中。这样,在使用torch.jit.script函数时,TorchScript会将其转换为相应的列表或字典类型,从而使其在TorchScript中正常工作。
import torch
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.params = torch.nn.ParameterList([torch.nn.Parameter(torch.randn(10, 10))])
def forward(self, x):
return x
model = MyModel()
scripted_model = torch.jit.script(model)
weight_norm
- weight_norm的事情,直接使用torch.nn.utils.remove_weight_norm还是会报存在weight_norm的事情,直接从模型中删除weight_g/weight_v,会出现新的weight找不到,可以在模型定义中加入_prepare_scriptable__这么删除
def __init__():
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def __prepare_scriptable__(self):
for hook in self.cond_layer._forward_pre_hooks.values():
if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm":
#_LG.warning("Removing weight_norm from %s", self.__class__.__name__)
torch.nn.utils.remove_weight_norm(self.cond_layer)
for in_layer in self.in_layers:
for hook in in_layer._forward_pre_hooks.values():
if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm":
torch.nn.utils.remove_weight_norm(in_layer)
for res_skip_layers in self.res_skip_layers:
for hook in res_skip_layers._forward_pre_hooks.values():
if hook.__module__ == "torch.nn.utils.weight_norm" and hook.__class__.__name__ == "WeightNorm":
torch.nn.utils.remove_weight_norm(res_skip_layers)
return self
drop_out
infer的时候删除drop_out
modulelist
使用index调用ModuleList的网络层会报错
self.in_layers = torch.nn.ModuleList()
for i in range(3):
x = self.in_layers[i](x)
修改为
self.in_layers = torch.nn.ModuleList()
for layer in self.in_layers:
x = layer(x)
self.in_layers = torch.nn.ModuleList()
for i, layer in enumerate(self.in_layers):
x = layer(x)