caffe不支持relu6_caffe的model和prototxt转pytorch的model

这段代码展示了如何将Caffe模型的权重转换到PyTorch模型中,包括卷积层和全连接层的转换。定义了一个yiNet类,其结构与Caffe模型相对应,并通过net_from_caffe函数加载并赋值权重。最后,保存转换后的PyTorch模型。
摘要由CSDN通过智能技术生成

importtorchimportnumpyimporttorch.nn as nnimporttorch.nn.functional as F#def conv_from_caffe(conv,re):#assert re['type']=='Convolution'#w=torch.from_numpy(re['weights'][0])#b=torch.from_numpy(re['weights'][1])#assert conv.weight.data.size() == re['weight'][0].size()#assert conv.bias.data.size() == re['weight'][1].size()#conv.weight.data.copy_(w)#conv.bias.data.copy_(b)#def fc_from_caffe(fc,re):#assert re['type']=='InnerProduct'#w=torch.from_numpy(re['weights'][0])#b=torch.from_numpy(re['weights'][1])#assert fc.weight.data.size() == re['weight'][0].size()#assert fc.bias.data.size() == re['weight'][1].size()#fc.weight.data.copy_(w)#fc.bias.data.copy_(b)

defnet_from_caffe(n,re):

i=-1

for name, l1 inn.named_children():try:

l2=getattr(n, name)

l2.weight#skip ReLU / Dropout

exceptException:continuei+=1

while len(re[i]['weights'])==0 and i

i+=1w=torch.from_numpy(re[i]['weights'][0])

b=torch.from_numpy(re[i]['weights'][1])assert w.size() ==l2.weight.size()assert b.size() ==l2.bias.size()

l2.weight.data.copy_(w)

l2.bias.data.copy_(b)classyiNet(nn.Module):def __init__(self):

super(yiNet, self).__init__()

self.conv1_1= nn.Conv2d(3, 64, 3,padding=1)

self.relu1_1= nn.ReLU(inplace=True)

self.conv1_2= nn.Conv2d(64, 64, 3,padding=1)

self.relu1_2= nn.ReLU(inplace=True)

self.pool1= nn.MaxPool2d(2, stride=2, ceil_mode=True) #ceil or floor # 5

#conv2

self.conv2_1 = nn.Conv2d(64, 128, 3,padding=1)

self.relu2_1= nn.ReLU(inplace=True)

self.conv2_2= nn.Conv2d(128, 128, 3,padding=1)

self.relu2_2= nn.ReLU(inplace=True)

self.pool2= nn.MaxPool2d(2, stride=2, ceil_mode=True) #1/4

#conv3

self.conv3_1 = nn.Conv2d(128, 256, 3,padding=1) #11

self.relu3_1 = nn.ReLU(inplace=True)

self.conv3_2= nn.Conv2d(256, 256, 3,padding=1)

self.relu3_2= nn.ReLU(inplace=True)

self.conv3_3= nn.Conv2d(256, 256, 3,padding=1)

self.relu3_3= nn.ReLU(inplace=True)

self.pool3= nn.MaxPool2d(2, stride=2, ceil_mode=True) #1/8

#conv4

self.conv4_1 = nn.Conv2d(256, 512, 3,padding=1) #18

self.relu4_1 = nn.ReLU(inplace=True)

self.conv4_2= nn.Conv2d(512, 512, 3,padding=1)

self.relu4_2= nn.ReLU(inplace=True)

self.conv4_3= nn.Conv2d(512, 512, 3,padding=1)

self.relu4_3= nn.ReLU(inplace=True)

self.pool4= nn.MaxPool2d(2, stride=2, ceil_mode=True) #1/16

#conv5

self.conv5_1 = nn.Conv2d(512, 512, 3,padding=1) #25

self.relu5_1 = nn.ReLU(inplace=True)

self.conv5_2= nn.Conv2d(512, 512, 3,padding=1)

self.relu5_2= nn.ReLU(inplace=True)

self.conv5_3= nn.Conv2d(512, 512, 3,padding=1)

self.relu5_3= nn.ReLU(inplace=True)

self.pool5= nn.MaxPool2d(2, stride=2, ceil_mode=True) #1/32

#output length should be 4096 , go check it

#fc6

self.fc6 = nn.Linear(25088, 4096) #always input first , and the output second. this is different with caffe

self.relu6 = nn.ReLU(inplace=True)

self.drop6= nn.Dropout2d() #study the dropout # 34

#fc7

self.fc7 = nn.Linear(4096, 4096)

self.relu7= nn.ReLU(inplace=True)

self.drop7=nn.Dropout2d()#here is a fc7_drop7_0_split op # 38

self.classifier_color=nn.Linear(4096,32)

self.classifier_elements=nn.Linear(4096,195)

self.classifier_style=nn.Linear(4096,24)

self.classifier_types=nn.Linear(4096,224)

self.classifier_material=nn.Linear(4096,82) #43

self.classifier_attributes=nn.Linear(4096,100)defforward(self, x):

h=x

h=self.relu1_1(self.conv1_1(h))

h=self.relu1_2(self.conv1_2(h))

h=self.pool1(h)

h=self.relu2_1(self.conv2_1(h))

h=self.relu2_2(self.conv2_2(h))

h=self.pool2(h)

h=self.relu3_1(self.conv3_1(h))

h=self.relu3_2(self.conv3_2(h))

h=self.relu3_3(self.conv3_3(h))

h=self.pool3(h)

h=self.relu4_1(self.conv4_1(h))

h=self.relu4_2(self.conv4_2(h))

h=self.relu4_3(self.conv4_3(h))

h=self.pool4(h)

h=self.relu5_1(self.conv5_1(h))

h=self.relu5_2(self.conv5_2(h))

h=self.relu5_3(self.conv5_3(h))

h=self.pool5(h)

h=h.view(h.size(0),-1)

h=self.relu6(self.fc6(h))

h=self.drop6(h)

h=self.relu7(self.fc7(h))

h=self.drop7(h)

color=self.classifier_color(h)

elements=self.classifier_elements(h)

style=self.classifier_style(h)

types=self.classifier_types(h)

materials=self.classifier_material(h)

attributes=self.classifier_attributes(h)

h=torch.cat((color,elements,style,types,materials,attributes),1)returnhdefmain():

numpy_model_pth='./np.npy'n=yiNet()

numpy_model=numpy.load(numpy_model_pth,encoding = 'latin1')

net_from_caffe(n,numpy_model)

torch.save(n.state_dict(),'./th.state')

torch.save(n,'./yinet.pth')#then later:#the_model = TheModelClass(*args, **kwargs)#the_model.load_state_dict(torch.load(PATH))#in this way , you can detach the parameters with netModule type

if __name__ == '__main__':

main()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值