# TensorFlow 优化实践

GitChat 作者：沧夜

【不要错过文末彩蛋】

1. 前馈神经网络
2. 循环神经网络
3. 神经网络参数

### 前馈神经网络

#### 全链接网络与卷积网络

A=1111111111111111111111111111111111116×6

y=ActiveFunction(xA)

A=11111111111111116×6

A=[131313]1×3

import tensorflow as tf
import numpy as np
kn=np.ones([3,1,2],dtype=np.float32)
kn[:,:,0]=1/3
kernel = tf.Variable(kn)
x = tf.Variable(np.ones([1,1000,1],dtype=np.float32))
sess=tf.Session()
sess.run(tf.global_variables_initializer())
print("Out:",sess.run(y))
print("Shape:",np.shape(sess.run(y)))

Out: [[[ 0.66666669  2.        ]
[ 1.          3.        ]
[ 1.          3.        ]
...,
[ 1.          3.        ]
[ 1.          3.        ]
[ 0.66666669  2.        ]]]
Shape: (1, 1000, 2)

#### tensorflow构建前馈神经网络

import tensorflow as tf
#卷积层
def conv1d_layer(input_tensor, kernel_size, feature=2, active_function="relu", name='conv1d'):
activ={"relu":tf.nn.relu,"sigmoid":tf.nn.sigmoid,"tanh":tf.nn.tanh}
with tf.variable_scope(name):
shape = input_tensor.get_shape().as_list()
kernel = tf.get_variable('kernel',
(kernel_size, shape[-1], feature),
dtype=tf.float32,
initializer=tf.constant_initializer(0))
b = tf.get_variable('b',
[feature],
dtype=tf.float32,
initializer=tf.constant_initializer(0))
out = tf.nn.conv1d(input_tensor,
kernel,
stride=1,
return activ[active_function](out)
#全链接层
def full_layer(input_tensor, out_dim=2, active_function="relu", name='full'):
activ={"relu":tf.nn.relu,"sigmoid":tf.nn.sigmoid,"tanh":tf.nn.tanh}
with tf.variable_scope(name):
shape = input_tensor.get_shape().as_list()
W = tf.get_variable('W',
(shape[1], out_dim),
dtype=tf.float32,
initializer=tf.constant_initializer(0))
b = tf.get_variable('b',
[out_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(0))
out = tf.matmul(input_tensor,W) + b
return activ[active_function](out)

net=full_layer(xx, featrue=2, active_function="relu", name="full_connect_1")
net=conv1d_layer(net,3, feature=2, active_function="relu", name="conv1d_1")

net = conv1d_layer(net, 3, feature=2, active_function="relu", name="conv1d_1")
net = conv1d_layer(net, 3, feature=2, active_function="relu", name="conv1d_2")
net=conv1d_layer(net, 3, feature=2, active_function="relu", name="conv1d_3")

import tensorflow.contrib.slim as slim
net = slim.conv2d(net, 3, 1, scope='conv1d_1')
net = slim.flatten(net)
net = slim.flatten(net)
net = slim.fully_connected(net, 2,
activation_fn=tf.nn.relu,
scope='outes',
reuse=False)

keras其实是一个构建神经网络比较方便的函数库，其底层计算可以放到tensorflow之中：

from keras.layers import Input, Dense, Conv1D
net = Input(shape=(1000,1))
net=Activation("relu")(net)

### 循环神经网络

y=ActiveFunction(xA)

ht=ActiveFunction(ht1A1+xtA2)yt=htA3

yt=funct(xt)zt=funct(yt)

yt,ct=LSTM(xt,ct1)

#### tensorflow构建循环神经网络

cell = tf.contrib.rnn.BasicLSTMCell(
hidden_size, forget_bias=0.0, state_is_tuple=True,
reuse=not is_training)

cell = tf.contrib.rnn.MultiRNNCell(
[cell for _ in range(num_layers)], state_is_tuple=True)

for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)

### 神经网络参数选取

#### 自由参数数量与过拟合问题

import tensorflow as tf
import numpy as
#定义函数展开阶数
N = 6
x = tf.placeholder(dtype=tf.float32,shape=[1,None])
y = tf.placeholder(dtype=tf.float32,shape=[1,None])
comp = []
#函数展开项
for itr in range(N):
comp.append(tf.pow(x, itr))
x_v = tf.concat(comp,axis=0)
#定义展开系数
A = tf.Variable(tf.zeros([1, N]))
y_new = tf.matmul(A, x_v)
#定义loss函数
loss = tf.reduce_sum(tf.square(y-y_new))
#用梯度迭代法求解

sess = tf.Session()
sess.run(tf.global_variables_initializer())
#迭代9000次
for itr in range(90000):
sess.run(train_step,
feed_dict={x:np.array([[-1, 0, 0.5773502691896258, 1, 1.5, 2]]),
y:np.array([[0, 0, -0.3849, 0, 1.875, 4]])})
print(sess.run(A.value()))
#图形绘制
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('seaborn-darkgrid')
lin = np.array([np.linspace(-1, 2, 100)])
ly =sess.run(y_new, feed_dict={x: lin})
plt.plot(lin[0], ly[0])
plt.scatter([-1, 0, 0.5773502691896258, 1, 1.5, 2],
[0, 0, -0.3849, 0, 1.875, 4])
plt.show()

import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.style.use('seaborn-darkgrid')
lin = np.array([np.linspace(-5, 5, 100)])
ly =sess.run(y_new, feed_dict={x: lin})
plt.plot(lin[0], ly[0])
plt.scatter([-1, 0, 0.5773502691896258, 1, 1.5, 2],
[0, 0, -0.3849, 0, 1.875, 4])
plt.show()

N=3

#### BATCHSIZE

BATCHSIZE 最大内存 单次迭代时间
50 1534 8.7S
100 2096 19.2S
200 3392 37.3s

#### 梯度参数

train_step = tf.train.GradientDescentOptimizer(500).minimize(loss)

tf.train.GradientDescentOptimizer(5e-15)

#### BATCHNORM层

tf.contrib.layers.batch_norm

### 形象化

yt=f(xt,xt1)

y=f(x,t)

dxdt=40.0(yx)+0.16xzdydt=55.0xxz+20.0ydzdt=1.833z+xy+0.65x2

### 【GitChat达人课】

1. 前端恶棍 · 大漠穷秋 ：《Angular 初学者快速上手教程
2. Python 中文社区联合创始人 · Zoom.Quiet ：《GitQ: GitHub 入味儿
3. 前端颜值担当 · 余博伦：《如何从零学习 React 技术栈
4. GA 最早期使用者 · GordonChoi：《GA 电商数据分析实践课
6. 混元霹雳手 · 江湖前端：《Vue 组件通信全揭秘
7. 知名互联网公司安卓工程师 · 张拭心：《安卓工程师跳槽面试全指南

#### TensorFlow (一) 从入门到实践

2017-07-27 21:43:18

#### TensorFlow最佳实践样例程序

2017-04-16 23:23:03

#### TensorFlow 研究实践二

2016-05-05 11:15:41

#### TensorFlow学习（四）：优化器Optimizer

2016-11-27 19:05:04

#### TensorFlow神经网络优化策略

2017-05-24 17:21:07

#### 深度学习模型的优化算法及tensorflow实现

2018-01-30 21:53:33

#### tensorflow 优化器optimizer

2017-05-04 12:18:28

#### TensorFlow学习（3）：交叉熵、dropout、优化器

2018-03-19 10:50:06

#### Tensorflow框架基础之优化(一)

2017-06-01 12:53:43

#### 常见优化算法 (tensorflow对应参数)

2017-12-13 10:42:54