Hello, I have a question that how to covert MobileNet-SSD .caffemodel to .wk file using Ruyi studio? My model does not contain Batch Normallization Layers, so what changes in need to make in the original .prototxt file? My current .prototxt file is attached.
This is my current .prototxt file.
name: "MobileNet-SSD"
input: "data"
input_shape {
dim: 1
dim: 3
dim: 300
dim: 300
}
layer {
name: "conv0"
type: "Convolution"
bottom: "data"
top: "conv0"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 32
bias_term: true
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv0/relu"
type: "ReLU"
bottom: "conv0"
top: "conv0"
}
layer {
name: "conv1/dw"
type: "Convolution"
bottom: "conv0"
top: "conv1/dw"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 32
bias_term: true
pad: 1
kernel_size: 3
group: 32
weight_filler {
type: "msra"
}
engine: CAFFE
}
}
layer {
name: "conv1/dw/relu"
type: "ReLU"
bottom: "conv1/dw"
top: "conv1/dw"
}
layer {
name: "conv1"
type: "Convolution"
bottom: "conv1/dw"
top: "conv1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 64
bias_term: true
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv1/relu"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "conv2/dw"
type: "Convolution"
bottom: "conv1"
top: "conv2/dw"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 64
bias_term: true
pad: 1
kernel_size: 3
group: 64
stride: 2
weight_filler {
type: "msra"
}
engine: CAFFE
}
}
layer {
name: "conv2/dw/relu"
type: "ReLU"
bottom: "conv2/dw"
top: "conv2/dw"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "conv2/dw"
top: "conv2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: true
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv2/relu"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "conv3/dw"
type: "Convolution"
bottom: "conv2"
top: "conv3/dw"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: true
pad: 1
kernel_size: 3
group: 128
weight_filler {
type: "msra"
}
engine: CAFFE
}
}
layer {
name: "conv3/dw/relu"
type: "ReLU"
bottom: "conv3/dw"
top: "conv3/dw"
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv3/dw"
top: "conv3"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: true
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv3/relu"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4/dw"
type: "Convolution"
bottom: "conv3"
top: "conv4/dw"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: true
pad: 1
kernel_size: 3
group: 128
stride: 2
weight_filler {
type: "msra"
}
engine: CAFFE
}
}
layer {
name: "conv4/dw/relu"
type: "ReLU"
bottom: "conv4/dw"
top: "conv4/dw"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv4/dw"
top: "conv4"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: true
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv4/relu"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5/dw"
type: "Convolution"
bottom: "conv4"
top: "conv5/dw"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: true
pad: 1
kernel_size: 3
group: 256
weight_filler {
type: "msra"
}
engine: CAFFE
}
}
layer {
name: "conv5/dw/relu"
type: "ReLU"
bottom: "conv5/dw"
top: "conv5/dw"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv5/dw"
top: "conv5"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: true
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv5/relu"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "conv6/dw"
type: "Convolution"
bottom: "conv5"
top: "conv6/dw"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: true
pad: 1
kernel_size: 3
group: 256
stride: 2
weight_filler {
type: "msra"
}
engine: CAFFE
}
}
layer {
name: "conv6/dw/relu"
type: "ReLU"
bottom: "conv6/dw"
top: "conv6/dw"
}
layer {
name: "conv6"
type: "Convolution"
bottom: "conv6/dw"
top: "conv6"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 512
bias_term: true
kernel_size: 1
weight_filler {
type: "msra"
}
}