1.处理结构
input-------包含x-input和y-input
gradient descent--梯度下降---更新数据--参数重新赋值给隐藏层--进行下一步更新
一维函数回归
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1 + 0.3
Weights = tf.Variable(tf.random_uniform([1],-1,1))
biases = tf.Variable(tf.zeros([1]))
y = Weights*x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.initialize_all_variable()
sees = tf.Session()
sess.run(init)
for step in range(201):
see.run(train)
if step %20 ==0:
print(step, see.run(Weight),see.run(biases))
2.Session
3.variable
( tf.compat.v1.global_variables_initializer())
tf.assign()为赋值 功能
4.placeholder
tf.placehoder(tf.float32)---给定type,一般为float32
用字典的形式赋值
5.激励函数
y=AF(wx)-------非线性
6.添加层
def add_layer(inputs, in_size,out_size,activation_function=None):
7.plot
8.optimizer
9.tensorboard
with tf.name_scopeI('inputs'):
x_s = tf.placeholder(tf.float32,[ None , 1],name = 'x_input')
y_s = tf.placeholder(tf.float32,[None,1],name = 'y_input')
10.分类
import tensorflow as tf
from tensorflow.examples.tutorials.minist import input_data
mnist =input_data.read_data_sets('MNIST_data',one_hot=True)
def add_layer(inputs, in_size, out_size, activation_function=None,):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1,)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b,)
return outputs
def comput_accuracy(v_xc,y_vs):
global prediction
y_pre = see.run(prediction, feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
#cast数据转换,mean所有元素的均值
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys})
return result
prediction = add_layer(xs, 784, 10, activation_function=tf.nn.softmax)
# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.Session()
# important step
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys})
if i % 50 == 0:
print(compute_accuracy(
mnist.test.images, mnist.test.labels))
11.过拟合
方法一:增加数据量
方法二:正则化
L3是三次方 L4是四次方
方法三:dropout 正则化-----随机忽略神经元
保持多少存在
12.卷积神经网络
代码
13. 保持和提取
14.自编码
非监督学习
15.name_scop/variable_scop
name_scop 对于tf.get_variable无效
对于tf.Variable会检查是否名字是否一样,如一样则设置_1
调用var4时不成功会重新建立一个var4_1新变量
15.batch normalization
对于input——xs进行了批归一化
加层相同的层
tf1.x和tf2.的改进
tf.loss
16.迁移学习
VGG16.npy---参数
可以把训练好的模型,按照所需要求改变最后几层在进行训练