DeepID Generation I

17 篇文章 0 订阅
1 篇文章 0 订阅

Original Source: http://blog.csdn.net/a_1937/article/details/50334919

在原Blog基础上进行了修改,未能完全复现博主结果。


GitHub Source: https://github.com/HolmesShuan/DeepID-I-Reimplement


LFW Accuracy:91.9%

Val DataSet:LFW 128x128 Gray

Train DataSet:CASIA WebFace Washed 144x144 Gray (Crop to 128x128) 

Train Details: 60 epoch 

Train Accuracy: 80% +-

Test Accuracy: 74% +-


Train DataSet, Val DataSet均做过人脸Crop的Preprocess.


Network Structure:

name: "DEEPFACE"  
layer {
  name: "input_data"
  top: "data"
  top: "label"
  type: "Data"
  data_param {
    source: "/dataset/WebFace_train_lmdb"  
    backend: LMDB
    batch_size: 400
  }
  transform_param {
    mirror: true
    crop_size: 128
    mean_file: "/dataset/mean.binaryproto"  
  }
  include {
    phase: TRAIN
  }
}
layer {
  name: "input_data"
  top: "data"
  top: "label"
  type: "Data"
  data_param {
    source: "/dataset/WebFace_val_lmdb"  
    backend: LMDB
    batch_size: 200
  }
  transform_param {
    mirror: false
    crop_size: 128
    mean_file: "/dataset/mean.binaryproto"  
  }
  include {
    phase: TEST
  }
}

layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    name: "conv1_w"
    lr_mult: 1
    decay_mult: 0
  }
  param {
    name: "conv1_b"
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 20
    kernel_size: 4
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}

layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    name: "conv2_w"
    lr_mult: 1
    decay_mult: 0
  }
  param {
    name: "conv2_b"
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 40
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}

layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 1
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3"
  param {
    name: "conv3_w"
    lr_mult: 1
    decay_mult: 0
  }
  param {
    name: "conv3_b"
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 60
    kernel_size: 3
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}

layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "pool3"
  type: "Pooling"
  bottom: "conv3"
  top: "pool3"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "pool3"
  top: "conv4"
  param {
    name: "conv4_w"
    lr_mult: 1
    decay_mult: 0
  }
  param {
    name: "conv4_b"
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 80
    kernel_size: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}
layer {
  name: "fc160_1"
  type: "InnerProduct"
  bottom: "pool3"
  top: "fc160_1"
  param {
    name: "fc160_1_w"
    lr_mult: 1
    decay_mult: 1
  }
  param {
    name: "fc160_1_b"
    lr_mult: 2
    decay_mult: 1
  }
  inner_product_param {
    num_output: 256
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}

layer {
  name: "fc160_2"
  type: "InnerProduct"
  bottom: "conv4"
  top: "fc160_2"
  param {
    name: "fc160_2_w"
    lr_mult: 1
    decay_mult: 1
  }
  param {
    name: "fc160_2_b"
    lr_mult: 2
    decay_mult: 1
  }
  inner_product_param {
    num_output: 256
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}

layer {
  name: "fc160"
  type: "Eltwise"
  bottom: "fc160_1"
  bottom: "fc160_2"
  top: "fc160"
  eltwise_param {
    operation: SUM
  }
}
layer {
  name: "dropout"
  type: "Dropout"
  bottom: "fc160"
  top: "fc160"
  dropout_param {
    dropout_ratio: 0.4
  }
}

layer {
  name: "fc_class"
  type: "InnerProduct"
  bottom: "fc160"
  top: "fc_class"
  param {
    name: "fc_class_w"
    lr_mult: 1
    decay_mult: 1
  }
  param {
    name: "fc_class_b"
    lr_mult: 2
    decay_mult: 1
  }
  inner_product_param {
    num_output: 10574
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "fc_class"
  bottom: "label"
  top: "loss"
}

layer {
  name: "accuracy_TEST"
  type: "Accuracy"
  bottom: "fc_class"
  bottom: "label"
  top: "accuracy_TEST"
  include: { phase: TEST }
}

layer {
  name: "accuracy_TRAIN"
  type: "Accuracy"
  bottom: "fc_class"
  bottom: "label"
  top: "accuracy_TRAIN"
  include: { phase: TRAIN }
}
Get Features from "fc160" {dim has been changed from 160 to 256}


Solver.prototxt:

net: "models/deepface/train_val.prototxt"  

test_iter: 227
test_interval: 2044    
  
base_lr: 0.01  
lr_policy: "multistep"  
gamma: 0.5 
stepvalue: 12264  
stepvalue: 20440 
stepvalue: 28616
stepvalue: 36792
stepvalue: 51100
stepvalue: 75628
stepvalue: 89936
stepvalue: 102200
stepvalue: 114464
stepvalue: 126728
stepvalue: 138992
momentum: 0.9  
weight_decay: 0.005  
display: 100
max_iter: 138992
snapshot: 8176
snapshot_prefix: "models/deepface/WebFace"  
solver_mode: GPU  
device_id: 0  

Related Files:

如果你对LFW数据集的正确率测定方式不熟悉,请参阅:

http://blog.csdn.net/baidu_24281959/article/details/53218825

文件中给出了构建train数据集的val.txt与train.txt

用于测定LFW数据集Accuracy的包含正负样本对的.txt文件

用于测试的.py文件

生成lmdb文件的.sh文件

Here.




  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 9
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值