code and note for creating your own NN by caffe2

"""
Created on Sun Apr 28 10:00:14 2019

@author: wujing
"""
from caffe2.python import workspace, model_helper
import numpy as np

# =============================================================================
# x = np.random.rand(4,3,2)
# print(x)
# workspace.FeedBlob("my_x",x)
#
# x2 = workspace.FetchBlob("my_x")
# print(x2)
# =============================================================================

#create inpute data
data = np.random.rand(16,100).astype(np.float32)

#create labels for the data as integers[0,9]
label = (np.random.rand(16)*10).astype(np.int32)
workspace.FeedBlob("data",data)
workspace.FeedBlob("label",label)

'''=========define the structure and the inputs of the NN============='''
#create model using model helpler
m = model_helper.ModelHelper(name = "my fisrt net")
#generate weights and bias for FC, and store them as blobs.
weights = m.param_init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = m.param_init_net.ConstantFill([], 'fc_b', shape=[10, ])
fc_1 = m.net.FC(["data","fc_w","fc_b"],"fc_1")

#pay attention, the first param is not the string "fc_1",
#but the object fc_1. This also fit softmaxwithloss
pred = m.net.Sigmoid(fc_1,"pred")
softmax,loss = m.net.SoftmaxWithLoss([pred, "label"], ["softmax", "loss"])

'''=========execute the model=================='''
#initilize the model, by means of adding the following code,
#===>m.AddGradientOperators([loss]) as a backward function.
# it could train the model through calling .Runnet.
workspace.RunNetOnce(m.param_init_net)

#create the training model
workspace.CreateNet(m.net)

#run it multiple times.
#but This net only contains the forward pass,
#thus it is not learning anything.
for _ in range(100):
   #run for 10 times, it will train the model actually after adding a backward oper
   workspace.RunNet(m.name,10)
   
print(workspace.FetchBlob("softmax").shape)
print(workspace.FetchBlob("loss"))
#check the structure of the model
print(m.net.Proto())
#check the initial param of the model
print(m.param_init_net.Proto())

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值