神经网络的组成单元——神经元(感知器)
感知机模型是最早的人工神经元数学模型,感知器不仅仅能实现简单的布尔运算。它可以拟合任何的线性函数,任何线性分类或线性回归问题都可以用感知器来解决。还可以完成一些简单的视觉分类任务,比如区分三角形、圆形和矩形等。
感知机模型的结构如图:
MNIST数据集
通常对于一个小白来说,使用MNIST数据集进行入门学习是最合适的,MNIST数据集不大 ,结构简单,特征单一 ,由60k的训练样本和10k的测试样本构成,每个样本宽高为28x28,都是由0~255像素构成的单通道图片,在神经网络中,通常用以下的tensor进行表示:
[b , w, h, c] = [4, 28 , 28, 1]
b代表着参与训练时一共有多少张图片(通常说的batch),
w和h代表着当前图片的宽和高,
c代表着图片的通道数(由于MNIST为黑白图片,所以C=1),
[4, 28, 28, 1]代表着,每一次参与训练的图片张数有4张,每一张图片的宽高分别为28x28,每一张图片的通道数为1。
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
#设置后台打印日志等级 避免后台打印一些无用的信息
print(tf.__version__)
# print(x_train.shape,y_train.shape)
#数据预处理
def proprocess(x,y):
x = tf.cast(x, dtype=tf.float32) / 255.
y = tf.cast(y, dtype=tf.int32)
return x,y
#构建数据集对象,方便对数据的打乱,批处理等超操作
batch_size = 128
(x_train,y_train),(x_test,y_test) = datasets.mnist.load_data()
db = tf.data.Dataset.from_tensor_slices((x_train,y_train)).shuffle(1000).batch(batch_size)
db = db.map(proprocess)
db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test)).shuffle(1000).batch(batch_size)
db_test = db_test.map(proprocess)
#构建网络中用到的权重
w0 = tf.Variable(tf.random.truncated_normal([784,512],stddev =0.1))
b0 = tf.Variable(tf.zeros([512]))
w1 = tf.Variable(tf.random.truncated_normal([512,256],stddev =0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256,128],stddev =0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128,10],stddev =0.1))
b3 = tf.Variable(tf.zeros([10]))
#学习率
lr =0.004
# epoch表示整个训练集循环的次数 这里循环100次
for epoch in range(200):
# step表示当前训练到了第几个Batch
for step,(x,y) in enumerate(db):
#把训练集进行打平操作
x = tf.reshape(x,[-1,28*28])
with tf.GradientTape() as Tape:
h0 = x@w0 + tf.broadcast_to(b0,[x.shape[0],512])
h0 = tf.nn.relu(h0)
h1 = h0 @ w1 + b1
h1 = tf.nn.relu(h1)
h2 = h1@w2 + b2
h2 = tf.nn.relu(h2)
h3 = h2@w3 + b3
h3 = tf.nn.relu(h3)
#把标签转化成one_hot"独热"编码
y_onehot = tf.one_hot(y,depth=10)
#计算MSE
loss = tf.square(y_onehot - h3)
loss = tf.reduce_mean(loss)
#计算梯度
grads = Tape.gradient(loss,[w0,b0,w1,b1,w2,b2,w3,b3])
#更新参数
# w1 = w1 - lr * grads[0]
# b1 = b1 - lr * grads[1]
# w2 = w2 - lr * grads[2]
# b2 = b2 - lr * grads[3]
# w3 = w3 - lr * grads[4]
# b3 = b3 - lr * grads[5]
w0.assign_sub(lr * grads[0])
b0.assign_sub(lr * grads[1])
w1.assign_sub(lr * grads[2])
b1.assign_sub(lr * grads[3])
w2.assign_sub(lr * grads[4])
b2.assign_sub(lr * grads[5])
w3.assign_sub(lr * grads[6])
b3.assign_sub(lr * grads[7])
if step % 100 == 0:
print(epoch,step,'loss:',float(loss))
#每训练完一次数据集,测试一下准确率
total_correct,total_num = 0,0
for step,(x,y) in enumerate(db_test):
x = tf.reshape(x,[-1,28*28])
h0 = tf.nn.relu(x @ w0 + b0)
h1 = tf.nn.relu(h0@w1 + b1)
h2 = tf.nn.relu(h1@w2 + b2)
h3 = h2 @ w3 + b3
prob = tf.nn.softmax(h3,axis=1)
pred = tf.argmax(prob,axis=1)
pred = tf.cast(pred,dtype=tf.int32)
correct = tf.cast(tf.equal(pred,y),dtype=tf.int32)
correct = tf.reduce_sum(correct)
total_correct += int(correct)
total_num += x.shape[0]
acc = total_correct/total_num
print('test acc:',acc)
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets,Sequential,layers,optimizers,metrics
#设置后台打印日志等级 避免后台打印一些无用的信息
print(tf.__version__)
#数据预处理
def proprocess(x,y):
x = tf.cast(x, dtype=tf.float32) / 255.
y = tf.cast(y, dtype=tf.int32)
return x,y
#构建数据集对象,方便对数据的打乱,批处理等超操作
batch_size = 32
(x_train,y_train),(x_test,y_test) = datasets.mnist.load_data()
db = tf.data.Dataset.from_tensor_slices((x_train,y_train)).shuffle(1000).batch(batch_size)
db = db.map(proprocess).repeat(1000)
db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test)).shuffle(1000).batch(batch_size)
db_test = db_test.map(proprocess).repeat(20)
#构建网络中用到的权重
model = Sequential([layers.Dense(256,activation='relu'),
layers.Dense(128,activation='relu'),
layers.Dense(10)])
model.build(input_shape=(4,28*28))
model.summary()
optimizer = optimizers.SGD(lr = 0.003)
acc_meter = metrics.Accuracy()
for step,(x,y) in enumerate(db):
with tf.GradientTape() as Tape:
x = tf.reshape(x,[-1,28*28])
out = model(x)
y_one_hot = tf.one_hot(y,depth=10)
loss = tf.square(out - y_one_hot)
loss = tf.reduce_sum(loss)/x.shape[0]
acc_meter.update_state(tf.argmax(out,axis=1),y)
grads = Tape.gradient(loss,model.trainable_variables)
optimizer.apply_gradients(zip(grads,model.trainable_variables))
if step % 200 == 0 :
print(step,'loss:',float(loss),'acc:',acc_meter.result().numpy())
acc_meter.reset_states()