TensorFlow 学习笔记

前言

TensorFlow初学者简单笔记…持续更新中…
根据讲义为B站的UP主白夜_叉烧包的深度学习框架Tensorflow学习与应用:https://www.bilibili.com/video/av20542427
和W3Cschool的官方文档:https://www.w3cschool.cn/tensorflow_python/tensorflow_python-fibz28ss.html

Tensorflow的基础使用

计算图

with tf.Session() as sess:
  result = sess.run(xxx)
  print result
  
# Fetch 多个输出内容
with tf.Session():
  result = sess.run([a, b])
  print result

数据类型

常量 tf.constant

a=tf.constant([3,3])
#a为常量1*2矩阵

变量 tf.Variable

a=tf.Variable([1,1])
init=tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
#在使用变量的时候一定要初始化

占位符 tf.placeholder

a=tf.placeholder(tf.float32)
b=tf.placeholder(tf.float32)
output=tf.multiply(a,b)
with tf.Session() as sess:
    print(sess.run(output,feed_dict={a:[3],b:[2]}))

基本运算

绝对值 tf.abs

a=tf.Variable(tf.random_normal([10,1]))
b=tf.abs(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

加法 tf.add

a=tf.Variable(tf.random_normal([10,1]))
b=tf.abs(a)
c=tf.add(a,b)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(c))

矩阵乘法 tf.matmul

a=tf.Variable(tf.random_normal([10,1]))
b=tf.Variable(tf.random_normal([1,10]))
c=tf.matmul(a,b)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))
    print(sess.run(c))

最大(最小)值索引 tf.argmax(argmin)

a=tf.Variable(tf.random_normal([10,1]))
b=tf.argmax(a)
c=tf.argmin(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))
    print(sess.run(c))

相等或不等 (返回的是同样形式的矩阵,两个位置相等是True,不相等False) tf.equal(not_equal)

a=tf.Variable(tf.random_normal([10,1]))
b=tf.Variable(tf.random_normal([10,1]))
c=tf.equal(a,b)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))
    print(sess.run(c))

将b矩阵提供的索引在A矩阵中取出,构成新矩阵 tf.gather.

temp = tf.range(0,10)*10 + tf.constant(1,shape=[10])
temp2 = tf.gather(temp,[1,5,9])
with tf.Session() as sess:
    print(sess.run(temp))
    print(sess.run(temp2))

比大小 tf.maximum(minimum)

a=tf.Variable(tf.random_normal([10,1]))
c=tf.Variable(tf.random_normal([10,1]))
b=tf.maximum(a,c)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(c))
    print(sess.run(b))

取负 tf.negative

a=tf.Variable(tf.random_normal([10,1]))
b=tf.negative(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

范数(求矩阵各元素平方和的平方根)tf.norm

a=tf.Variable(tf.random_normal([10,1]))
b=tf.norm(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

最大值(最小值) tf.reduce_max(reduce_min)

a=tf.Variable(tf.random_normal([10,2]))
b=tf.reduce_max(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

平均值 tf.reduce_mean

a=tf.Variable(tf.random_normal([10,1]))
b=tf.reduce_mean(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

元素的累乘 tf.reduce_prod

a=tf.Variable(tf.random_normal([2,1]))
b=tf.reduce_prod(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

元素累和 tf.reduce_sum

a=tf.Variable(tf.random_normal([2,1]))
b=tf.reduce_sum(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

重塑 tf.reshape

a=tf.Variable(tf.random_normal([10,1]))
b=tf.reshape(a,[2,5])
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

四舍五入 tf.round

a=tf.Variable(tf.random_normal([5,1]))
b=tf.round(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

形状 tf.shape

a=tf.Variable(tf.random_normal([5,1]))
b=tf.shape(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

平方(平方根)tf.square(tf.sqrt)

a=tf.Variable(tf.random_normal([5,1]))
b=tf.sqrt(a)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))
    print(sess.run(b))

读入文件

读入文件 tf.read_file()

tar_image=tf.read_file('xx.jpg')
tar_mp4=tf.read_file('xx.mp4.')

模型保存与载入

tf.train.Saver(isTrain,train_steps,checkpoint_steps,checkpoints,checkpoint_dir)

isTrain:用来区分训练阶段和测试阶段,True表示训练,False表示测试
train_steps:表示训练的次数,例子中使用100
checkpoint_steps:表示训练多少次保存一下checkpoints,例子中使用50
checkpoint_dir:表示checkpoints文件的保存路径,例子中使用当前路径

saver = tf.train.Saver()
with tf.Session() as sess:
	save_path=saver.save(sess,  'model.ckpt')

restore(sess, save_path)

saver = tf.train.Saver()
with tf.Session() as sess:
	saver.restore(sess,'net/my_net.ckpt')

saver.restore(sess,‘net/my_net.ckpt’)

矩阵构造

单位矩阵 tf.eye

a=tf.Variable(tf.eye(10))
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))

零矩阵 tf.zeros

c=tf.zeros([10,1])
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(c))

随机矩阵 tf.random

tf.random_normal(正态分布)
tf.truncated_normal
tf.random_uniform
tf.random_shuffle
tf.random_crop
tf.multinomial
tf.random_gamma
tf.set_random_seed

Tensorboard可视化结构

重置图 tf.reset_default_graph()

tf.reset_default_graph()

命名空间(即定义图中的每个集合单元)name_scope(name,default_name=None,values=None)
在这里插入图片描述

with tf.name_scope('input'):
    x = tf.placeholder(tf.float32,[None,784],name='x-input')
    y = tf.placeholder(tf.float32,[None,10],name='y-input')
with tf.name_scope('layer'):
    with tf.name_scope('weights'):
        W = tf.Variable(tf.zeros([784,10]),name='w')
    with tf.name_scope('bias'):
        b = tf.Variable(tf.zeros([10]),name='b')
    with tf.name_scope('wx_plus_b'):
        wx_plus_b=tf.matmul(x,W)+b
    with tf.name_scope('softmax'):
        prediction = tf.nn.softmax(wx_plus_b)

绘制梯形图 tf.summary.scalar(tags, values, collections=None, name=None)
在这里插入图片描述

tf.summary.scalar('loss',loss)

绘制柱状图 tf.summary.histogram(tags, values, collections=None, name=None)
在这里插入图片描述

tf.summary.histogram('histogram',var)

存放summary至硬盘 tf.summary.merge_all

merged=tf.summary.merge_all()
...........
	summary,_=sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})

指定文件保存图 tf.summary.FileWriter(path,sess.graph)

writer=tf.summary.FileWriter('logs/',sess.graph)
...........
writer.add_summary(summary,epoch)

保存图片tf.summary.image(tag, tensor, max_images=3, collections=None, name=None)

tf.summary.image('input',image,10)

保存音频 tf.summary.audio(name, tensor, sample_rate, max_outputs=3, collections=None, family=None)

tf.summary.audio('input','xx.mp3',10)

tensorboard简单示例

# coding: utf-8

# In[2]:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

tf.reset_default_graph()
# In[3]:

#载入数据集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)

#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size

def variable_summaries(var):
    with tf.name_scope('summaries'):
        mean=tf.reduce_mean(var)
        tf.summary.scalar('mean',mean)
        with tf.name_scope('stddev'):
            stddev=tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
        tf.summary.scalar('stddev',stddev)
        tf.summary.scalar('max',tf.reduce_max(var))
        tf.summary.histogram('histogram',var)
with tf.name_scope('input'):
    #定义两个placeholder
    x = tf.placeholder(tf.float32,[None,784],name='x-input')
    y = tf.placeholder(tf.float32,[None,10],name='y-input')

#创建一个简单的神经网络
with tf.name_scope('layer'):
    with tf.name_scope('weights'):
        W = tf.Variable(tf.zeros([784,10]),name='w')
        variable_summaries(W)
    with tf.name_scope('bias'):
        b = tf.Variable(tf.zeros([10]),name='b')
        variable_summaries(b)
    with tf.name_scope('wx_plus_b'):
        wx_plus_b=tf.matmul(x,W)+b
    with tf.name_scope('softmax'):
        prediction = tf.nn.softmax(wx_plus_b)

#二次代价函数
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.square(y-prediction))
    tf.summary.scalar('loss',loss)
#使用梯度下降法
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

#初始化变量
init = tf.global_variables_initializer()

#结果存放在一个布尔型列表中
with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
        correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
    #求准确率
    with tf.name_scope('accuracy'):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        tf.summary.scalar('accuracy',accuracy)
merged=tf.summary.merge_all()
with tf.Session() as sess:
    sess.run(init)
    writer=tf.summary.FileWriter('logs/',sess.graph)
    for epoch in range(21):
        for batch in range(n_batch):
            batch_xs,batch_ys =  mnist.train.next_batch(batch_size)
            summary,_=sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})
        writer.add_summary(summary,epoch)
        acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))

在这里插入图片描述
之后cmd中输入后用火狐打开网址

image模块

导入图片.imageio.imread(image)
此时返回的type为unit8类型

import imageio
 
image_value = imageio.imread('SYE[WLMPD[@UPKRO}NX[58C.jpg')

图片解码(使用tf.read_file时使用)
tf.image.encode_png(image,name=None)(tf.image.encode_gif(image,name=None),tf.image.encode_jpeg(image,name=None))
此时返回的type为unit8类型

image_value = tf.read_file('SYE[WLMPD[@UPKRO}NX[58C.jpg')
img = tf.image.decode_png(image_value)
with tf.Session() as sess:
    img_ = img.eval()
plt.imshow(img_)
plt.show()

调整亮度、对比度 tf.image.adjust_brightness(image,delta)
tf.image.adjust_contrast 等等

img=tf.image.adjust_brightness(img,0.5)

更改图片像素大小 cv2.resize(image, (x, y)

import cv2
image =cv2.resize(image_value, (x, y))

转换图片格式

image_value = tf.image.convert_image_dtype(image_value, dtype=tf.float32)

简单样例

image_value =imageio.imread('SYE[WLMPD[@UPKRO}NX[58C.jpg')
image_value =cv2.resize(image_value, (100, 100))
flag=np.zeros([1,100,100,3])
flag[0,:,:,:]=image_value
image_value = tf.image.convert_image_dtype(flag, dtype=tf.float32)
#这样就构成了一个可以输入cnn的conv2d的图片矩阵

神经网络(NN)

激活函数

激活操作提供用于神经网络的不同类型的非线性。这些包括平滑的非线性(sigmoid,tanh,elu,softplus,和softsign),连续的,但不是到处可微函数(relu,relu6,crelu和relu_x),和随机正规化(dropout)。

所有激活操作应用于分量,并产生与输入张量相同形状的张量。

tf.nn.relu
tf.nn.relu6
tf.nn.crelu
tf.nn.elu
tf.nn.softplus
tf.nn.softsign
tf.nn.dropout
#    L_drop = tf.nn.dropout(L,keep_prob) 
tf.nn.bias_add
tf.sigmoid
tf.tanh

优化器Optimizer

优化器基类提供了计算渐变的方法,并将渐变应用于变量。子类的集合实现了经典的优化算法,如 GradientDescent和Adagrad。

您永远不会实例化优化器类本身,而是实例化其中一个子类。

tf.train.Optimizer
tf.train.GradientDescentOptimizer
tf.train.AdadeltaOptimizer
tf.train.AdagradOptimizer
tf.train.AdagradDAOptimizer
tf.train.MomentumOptimizer
tf.train.AdamOptimizer(用这个学习率要调很小)
tf.train.FtrlOptimizer
tf.train.ProximalGradientDescentOptimizer
tf.train.ProximalAdagradOptimizer
tf.train.RMSPropOptimizer

#############################################
train_step=tf.train.GradientDescentOptimizer(0.5).minimize(loss)
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

交叉熵代价函数

tf.nn.softmax_cross_entropy_with_logits(_sentinel=None,labels=None,logits=None,dim=-1,name=None)
tf.nn.sigmoid_cross_entropy_with_logits(_sentinel=None,labels=None,logits=None,name=None)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y,logits=prediction))

卷积神经网络基本函数(CNN)

卷积计算 tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)

#input[batch, in_height, in_width, in_channels]
#filter[filter_height, filter_width, in_channels, out_channels]
#strides=[1, s, s, 1]
#padding='valid'('same')

input = tf.Variable(tf.random_normal([1,3,3,5]))
filter = tf.Variable(tf.random_normal([1,1,5,1]))
op = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='VALID')
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(input).shape)
    print(sess.run(filter).shape)
    print(sess.run(op).shape)

池化层 tf.nn.max_pool(value, ksize, strides, padding, name=None)

#value[batch, in_height, in_width, in_channels]
#ksize[1,k,k,1]
#strides[1,s,s,1]

input = tf.Variable(tf.random_normal([1,4,4,5]))
op = tf.nn.max_pool(input, ksize=[1,2,2,1], strides=[1, 2, 2, 1], padding='VALID')
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(input).shape)
    print(sess.run(op).shape)

矩阵拉伸成向量 tf.contrib.layers.flatten()

input = tf.Variable(tf.random_normal([1,4,4,5]))
op = tf.contrib.layers.flatten(input)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(input).shape)
    print(sess.run(op).shape)

全连接层 tf.contrib.layers.fully_connected(F, num_outputs,activation_fn)

input = tf.Variable(tf.random_normal([1,4,4,5]))
op = tf.contrib.layers.flatten(input)
b=tf.contrib.layers.fully_connected(op,6,activation_fn=None)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(input).shape)
    print(sess.run(op).shape)
    print(sess.run(b).shape)

one-hot张量 tf.one_hot(lables,C,axis=0)
独热编码一般是在有监督学习中对数据集进行标注时候使用的,指的是在分类问题中,将存在数据类别的那一类用X表示,不存在的用Y表示,这里的X常常是1, Y常常是0。
举个例子:
比如我们有一个5类分类问题,我们有数据(Xi,Yi),其中类别Yi有五种取值(因为是五类分类问题),所以如果Yj为第一类那么其独热编码为: [1,0,0,0,0],如果是第二类那么独热编码为:[0,1,0,0,0]

a = tf.one_hot(indices=[0, 1, 2], depth=3, axis=0)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(a))

循环神经网络基本函数(RNN)

RNN block单元

tf.contrib.rnn.BasicLSTMCell(num_units,forget_bias,state_is_tuple,activation,reuse,name)

num_units神经元数量
forget_bias遗忘的偏置是0-1的数,1全记得,0全忘记
state_is_tuple最好是true
activation激活函数
reuse重使用已存在的变量

lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
#tf.contrib.rnn.GRUCell
#tf.contrib.rnn.LSTMCell

初始化输入cell
lstm_cell.zeros(batch_size,dtype)

lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
initial_state=lstm_cell.zero_state(batch_size,dtype=tf.float32)

RNN计算
tf.nn.dynamic_rnn(cell,inputs,sequence_length=None,initial_state=None,dtype=None,parallel_iterations=None,swap_memory=False,time_major=False,scope=None)

cell:RNNCell实例
inputs :是一个tensor,形状必须是[batch_size,max_time,embed_size]
outputs:RNN的各个block的输出,是一个tensor,shape 
[batch_size,max_time,cell.output_size]
#比如此处outputs[27]=final_state[1]
state: RNN最后时间步的state,shape:[state, batch_size,cell.state_size]
# final_state[0]是cell state,指细胞状态,存着过去的信息
# final_state[1]是hidden_state ,指当前细胞的输出,本来是要传给下一个细胞的

outputs,final_state = tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)

DropoutWrapper(cell,input_keep_prob=1.0,output_keep_prob=1.0,state_keep_prob=1.0,variational_recurrent=False,input_size=None,dtype=None,seed=None,dropout_state_filter_visitor=None)

第二个参数就是输入数据使用dropout,后面的概率,如果是一,就不会执行dropout。第三个参数是一样的意思。一般这里的dropout是在输入,输出,或者不用的循环层之间使用,或者全连接层,不会在同一层的循环体中使用。

cell_dr = tf.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=1.0, output_keep_prob=1.0)

简单样例



#很重要一定要?

tf.reset_default_graph()




# 输入图片是28*28
n_inputs = 28 #输入一行,一行有28个数据
max_time = 28 #一共28行
lstm_size = 100 #隐层单元
n_classes = 10 # 10个分类
batch_size = 50 #每批次50个样本
n_batch = mnist.train.num_examples // batch_size #计算一共有多少个批次

#这里的none表示第一个维度可以是任意的长度
x = tf.placeholder(tf.float32,[None,784])
#正确的标签
y = tf.placeholder(tf.float32,[None,10])

#初始化权值
weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
#初始化偏置值
biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))


#定义RNN网络
def RNN(X,weights,biases):
    # inputs=[batch_size, max_time, n_inputs]
    inputs = tf.reshape(X,[-1,max_time,n_inputs])
    #定义LSTM基本CELL
    lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
    outputs,final_state = tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)
    results = tf.nn.softmax(tf.matmul(final_state[1],weights) + biases)
    return results
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值