TensorFlow 全连接层 卷积层 池化层 单向lstm 双向lstm 的基本操作

首先导包

import tensorflow as tf
import tensorflow.contrib as contrib
import tensorflow.layers as layer
import tensorflow.nn as nn

全连接层

batch = 8
data_dim = 10
out_dim = 15
data = tf.Variable(tf.random_uniform([batch, data_dim]))
fc_out = contrib.layers.fully_connected(data, out_dim)
dense_out = layer.dense(data, out_dim)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out = sess.run(dense_out)
# print(out)
print(out.shape)

卷积层 池化层

batch = 8
image_h = 10
image_w = 10
image_channel = 1
out_channel = 5
cnn_kernel_size_h = 3
cnn_kernel_size_w = 3
cnn_stride = 1
pool_size_h = 2
pool_size_w = 2
pool_stride = 1
images = tf.Variable(tf.random_uniform([batch, image_h, image_w, image_channel]))
cnn_out = contrib.layers.conv2d(images, out_channel, [cnn_kernel_size_h, cnn_kernel_size_w], cnn_stride, padding='SAME')
# cnn_out = layer.conv2d(images, out_channel, [cnn_kernel_size_h, cnn_kernel_size_w], cnn_stride, padding='SAME')
pool_out = contrib.layers.max_pool2d(cnn_out, [pool_size_h, pool_size_w], stride=pool_stride, padding='SAME')
# pool_out = contrib.layers.avg_pool2d(cnn_out, [pool_size_h, pool_size_w], stride=pool_stride, padding='SAME')
# pool_out = tf.nn.max_pool(cnn_out, [1, pool_size_h, pool_size_w, 1], strides=[1, pool_stride, pool_stride, 1],
#                           padding='SAME')
# pool_out = tf.nn.avg_pool(cnn_out, [1, pool_size_h, pool_size_w, 1], strides=[1, pool_stride, pool_stride, 1],
#                           padding='SAME')
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out = sess.run(pool_out)
# print(out)
print(out.shape)

单向lstm batch为data第一维度

batch = 8
step = 10
vector = 50
hidden = 7
data = tf.Variable(tf.random_uniform([batch, step, vector]))
lstm_cell = contrib.rnn.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
# lstm_cell = nn.rnn_cell.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
init_state = lstm_cell.zero_state(step, dtype=tf.float32)
rnn_out, final_state = nn.dynamic_rnn(lstm_cell, data, initial_state=init_state, time_major=True)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out, state = sess.run([rnn_out, final_state])
# print(out, state)
print(out.shape, state.shape)

双向lstm batch为data第一维度

batch = 8
step = 10
vector = 50
hidden = 7
data = tf.Variable(tf.random_uniform([batch, step, vector]))
lstm_cell = contrib.rnn.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
# lstm_cell = nn.rnn_cell.BasicLSTMCell(hidden, forget_bias=1.0, state_is_tuple=False)
init_state = lstm_cell.zero_state(step, dtype=tf.float32)
rnn_out, final_state = nn.bidirectional_dynamic_rnn(lstm_cell, lstm_cell, data, initial_state_fw=init_state,
                                                    initial_state_bw=init_state, time_major=True)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
out, state = sess.run([rnn_out, final_state])
out = tf.concat(out, 1)
state = tf.concat(state, 1)
# print(out, state)
print(out.shape, state.shape)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值