[torch]Getting the Output of a Layer

https://groups.google.com/forum/#!topic/torch7/R9DAdx95aqc

introduction

require 'cutorch'
require 'cunn'
require 'rnn'
require 'os'

tensor1 = torch.zeros(5,10)
tensor1[3]=torch.rand(1,10)
print(tensor1)
input = {tensor1,torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10)}
net = nn.Sequencer(
   nn.Sequential()
      :add(nn.MaskZero(nn.FastLSTM(10,3),1))
      :add(nn.MaskZero(nn.Linear(3,4),1))
      :add(nn.MaskZero(nn.LogSoftMax(),1))
)

output = net:forward(input)
local m = net.modules
--[[
print("net")
print(net)
print("m")
print(m)
--]]
for i = 1, #input do
        print(output[i])
        print(m[1].sharedClones[i].modules[1].output)
end

print("net")
print(net)
print("m")
print(m)

output

net 
nn.Sequencer @ nn.Recursor @ nn.Sequential {
  [input -> (1) -> (2) -> (3) -> output]
  (1): nn.MaskZero @ nn.FastLSTM(10 -> 3)
  (2): nn.MaskZero @ nn.Linear(3 -> 4)
  (3): nn.MaskZero @ nn.LogSoftMax
}
m   
{
  1 : 
    {
      sharedClones : 
        {
          1 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
          2 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
          3 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
          4 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
          5 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
          6 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
        }
      step : 7
      outputs : 
        {
          1 : DoubleTensor - size: 5x4
          2 : DoubleTensor - size: 5x4
          3 : DoubleTensor - size: 5x4
          4 : DoubleTensor - size: 5x4
          5 : DoubleTensor - size: 5x4
          6 : DoubleTensor - size: 5x4
        }
      output : DoubleTensor - size: 5x4
      gradInput : DoubleTensor - empty
      modules : 
        {
          1 : 
            {
              gradInput : DoubleTensor - empty
              modules : 
                {
                  1 : {...}
                  2 : {...}
                  3 : {...}
                }
              _type : "torch.DoubleTensor"
              output : DoubleTensor - size: 5x4
            }
        }
      _gradOutputs : {...}
      rho : 6
      recurrentModule : 
        {
          gradInput : DoubleTensor - empty
          modules : 
            {
              1 : 
                {
                  output : DoubleTensor - size: 5x3
                  gradInput : DoubleTensor - empty
                  nInputDim : 1
                  batchmode : true
                  zeroMask : ByteTensor - size: 5x1
                  _type : "torch.DoubleTensor"
                  _zeroMask : DoubleTensor - size: 5x1
                  module : {...}
                  modules : {...}
                }
              2 : 
                {
                  output : DoubleTensor - size: 5x4
                  gradInput : DoubleTensor - empty
                  nInputDim : 1
                  batchmode : true
                  zeroMask : ByteTensor - size: 5x1
                  _type : "torch.DoubleTensor"
                  _zeroMask : DoubleTensor - size: 5x1
                  module : {...}
                  modules : {...}
                }
              3 : 
                {
                  output : DoubleTensor - size: 5x4
                  gradInput : DoubleTensor - empty
                  nInputDim : 1
                  batchmode : true
                  zeroMask : ByteTensor - size: 5x1
                  _type : "torch.DoubleTensor"
                  _zeroMask : DoubleTensor - size: 5x1
                  module : {...}
                  modules : {...}
                }
            }
          _type : "torch.DoubleTensor"
          output : DoubleTensor - size: 5x4
        }
      nSharedClone : 6
      _type : "torch.DoubleTensor"
      gradInputs : {...}
      module : 
        {
          gradInput : DoubleTensor - empty
          modules : 
            {
              1 : 
                {
                  output : DoubleTensor - size: 5x3
                  gradInput : DoubleTensor - empty
                  nInputDim : 1
                  batchmode : true
                  zeroMask : ByteTensor - size: 5x1
                  _type : "torch.DoubleTensor"
                  _zeroMask : DoubleTensor - size: 5x1
                  module : {...}
                  modules : {...}
                }
              2 : 
                {
                  output : DoubleTensor - size: 5x4
                  gradInput : DoubleTensor - empty
                  nInputDim : 1
                  batchmode : true
                  zeroMask : ByteTensor - size: 5x1
                  _type : "torch.DoubleTensor"
                  _zeroMask : DoubleTensor - size: 5x1
                  module : {...}
                  modules : {...}
                }
              3 : 
                {
                  output : DoubleTensor - size: 5x4
                  gradInput : DoubleTensor - empty
                  nInputDim : 1
                  batchmode : true
                  zeroMask : ByteTensor - size: 5x1
                  _type : "torch.DoubleTensor"
                  _zeroMask : DoubleTensor - size: 5x1
                  module : {...}
                  modules : {...}
                }
            }
          _type : "torch.DoubleTensor"
          output : DoubleTensor - size: 5x4
        }
      rmInSharedClones : true
    }
}

可以看出m是一个table类型的变量. 所以看看想要它输出什么就能输出什么.
例如:

tensor1 = torch.zeros(5,10)
tensor1[3]=torch.rand(1,10)
print(tensor1)
input = {tensor1,torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10),torch.rand(5,10)}
net = nn.Sequencer(
   nn.Sequential()
      :add(nn.MaskZero(nn.FastLSTM(10,3),1))
--      :add(nn.MaskZero(nn.Linear(3,4),1))
--      :add(nn.MaskZero(nn.LogSoftMax(),1))
)

output = net:forward(input)
local m = net.modules
--[[
print("net")
print(net)
print("m")
print(m)
--]]
for i = 1, #input do
        print(output[i])
        print(m[1].sharedClones[i].modules[1].output)
end)

test

require 'cutorch'
require 'cunn'
require 'rnn'
require 'os'
--[[
net = nn.Sequencer(
   nn.Sequential()
      :add(nn.MaskZero(nn.FastLSTM(10,6),1))
      :add(nn.MaskZero(nn.Linear(6,4),1))
      :add(nn.MaskZero(nn.LogSoftMax(),1))
)
parameters, gradParameters = net:getParameters()
lightModel = net:clone('weight','bias','running_mean','running_std')
torch.save('model.t7',lightModel)
--]]

net=torch.load("model.t7")

--[[
tensor1 = torch.zeros(5,10)
tensor1[3]=torch.Tensor{3,4,5,6,7,8,23,2,12,90}
tensor2 = torch.ones(5,10)
tensor2[{{1,2},{}}]=torch.Tensor{ {1,3,4,5,6,0,3,2,56,2}, {5,3,2,5,7,3,45,78,235,10}}
tensor2[4]=torch.ones(1,10):fill(3.2)
tensor2[5]=torch.zeros(1,10)
input = {tensor1,tensor2}
--]]
--net=torch.load("/work1/t2g-shinoda2011/15M54105/trecvid/torch-lstm3/batch5_epoch5_hiddensize256_cw1/model_100ex_batch5_unit256_epoch70")
--[[
array = {}
tensor1  = torch.zeros(5,10)
tensor1[3]=torch.rand(1,10)
tensor2 = torch.rand(5,10)
tensor3 = torch.rand(5,10)
tensor4 = torch.rand(5,10)
tensor1=tensor1:cuda()
tensor2=tensor2:cuda()
tensor3=tensor3:cuda()
tensor4=tensor4:cuda()
table.insert(array, tensor1)
table.insert(array, tensor2)
table.insert(array, tensor3)
table.insert(array, tensor4)
file = torch.DiskFile('input.asc', 'w')
file:writeObject(array)
file:close()
os.exit()
--]]
net:cuda()
file = torch.DiskFile('input.asc', 'r')
input = file:readObject()
print(input)
local m = net.modules
output = net:forward(input)
--[[
print("net")
print(net)
print("m")
print(m)
--]]
model = (nn.MaskZero(nn.LogSoftMax(),1)):cuda()
for seqj = 1, #input do
    print(seqj)
    res = m[1].sharedClones[seqj].modules[2].output
    out1=output[seqj]
    out2=model:forward(res)
    print(out1-out2)

end

test.lua得到的tep = m[1].sharedClones[seqj].modules[2].output[i]是取了net的第二层的输出.
out1与out2值相等.说明确实tep是第二层的输出.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值