在Tensorflow中,神经网络的权重是一个variable,所以要打印出来只需要按下面的代码即可。最重要的就是sess.run()
import tensorflow as tf
weight = tf.Variable(tf.random_normal([1,2]))
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(weight))
但是如果使用的一个函数来添加隐藏层和输出层,权重的变量初始化在函数里面,就需要将权重作为结果返回。举个?:
下面建立一个神经网络,用add_layer()函数来添加隐藏层和输出层。我们可以看到最后会返回一个weights的list。
import numpy as np
import tensorflow as tf
ACTIVATION = tf.nn.relu
N_LAYERS = 3
N_HIDDEN_UNITS = 15
下面这个是一个种子函数,让每一次的运行结果都保持一致。
def fix_seed(seed=1):
# reproducibel
np.random.seed(seed)
tf.set_random_seed(seed)
神经网络的结构函数,里面包含了添加隐藏层的函数add_player():
def built_net(xs, ys):
def add_layer(inputs, in_size, out_size, activation_function=None):
# weights and biases
Weights = tf.Variable(tf.random_normal([in_size, out_size], mean=0., stddev=1.))
biases = tf.Variable(tf.zeros([1, out_size])+0.1)
# w = Weights.read_value()
# fully connected product
Wx_plus_b = tf.matmul(inputs, Weights)+biases
# activation
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs, Weights
fix_seed(1)
# record inputs for every layer
layers_inputs = [xs]
w_outs = []
# build hidden layers
for l_n in range(N_LAYERS):
layer_input = layers_inputs[l_n]
in_size = layers_inputs[l_n].get_shape()[1].value
output, weights= add_layer(
layer_input,
in_size,
N_HIDDEN_UNITS,
ACTIVATION,
norm,
)
layers_inputs.append(output)
w_outs.append(weights)
##output layer
prediction, weights = add_layer(layers_inputs[-1], 15, 1,
activation_function=None)
cost = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), reduction_indices=[1]))
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
return [train_op, cost, layers_inputs, w_outs]
建立要用的数据集
# make up data
fix_seed(1)
x_data = np.linspace(-7, 10, 2500)[:, np.newaxis]
np.random.shuffle(x_data)
noise = np.random.normal(0, 8, x_data.shape)
y_data = np.square(x_data) - 5 + noise
运行网络,并打印出weights变量
xs = tf.placeholder(tf.float32, [None, 1])#[num_samples, num_features]
ys = tf.placeholder(tf.float32, [None, 1])
train_op, cost, layers_inputs, weights = built_net(xs, ys)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(251):
if i%50 == 0:
print("------------")
print(sess.run(weights))
sess.run(train_op, feed_dict={xs:x_data[i*10:i*10+10], ys:y_data[i*10:i*10+10]})
参考连接:https://morvanzhou.github.io/tutorials/machine-learning/tensorflow/5-13-BN/