Python用Tensorflow构建全连接神经网络进行半月形数据集分类


import numpy as np
import tensorflow as  tf
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
x,y = make_moons(n_samples=500,noise=0.09,random_state=0)
x = (x +1.2)/3.4 + 0.01#数据标准化处理分布在0-1之间
x_train,x_test,y_train,y_test = train_test_split(x,y)#分配训练集与测试集
x_train = tf.convert_to_tensor(x_train,dtype=tf.float32)#数据类型转换将ndarry转换为tensor
y_train = tf.convert_to_tensor(y_train,dtype=tf.int32)
w1 = tf.Variable(tf.random.truncated_normal([2,8],stddev=0.1))#初始化权重
w2 = tf.Variable(tf.random.truncated_normal([8,2],stddev=0.1))
b1 = tf.Variable(tf.zeros([8]))#初始化阈值
b2 = tf.Variable(tf.zeros([2]))
net1 = x_train@w1 + b1#第一层计算
out1 = tf.nn.relu(net1)#经过激活函数
net2 = out1@w2 + b2#第二层计算
out2 = tf.nn.relu(net2)#经过激活函数
y_train = tf.one_hot(y_train,depth=2)#对Y进行one—hot编码
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_train,logits=out2)#计算损失函数
loss = tf.reduce_mean(loss)
#进行一次反向传播
with tf.GradientTape() as tape:
    tape.watch([w1,b1,w2,b2])
    out2 = tf.nn.softmax(tf.nn.relu( tf.nn.sigmoid(tf.nn.relu(x_train@w1 + b1)@w2 + b2)))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_train,logits=out2))
grads = tape.gradient(loss,[w1,b1,w2,b2])
lr = 0.1#步长
#更新参数
All_loss = []
for step in range(30000):#迭代30000次
    with tf.GradientTape() as tape:
        tape.watch([w1,b1,w2,b2])
        out2= tf.nn.softmax(tf.nn.relu( tf.nn.sigmoid(tf.nn.relu(x_train@w1 + b1)@w2 + b2)))
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_train,logits=out2))
    All_loss.append(loss)
    grads = tape.gradient(loss,[w1,b1,w2,b2,])
    #更新
    w1.assign_sub(lr*grads[0])
    b1.assign_sub(lr*grads[1])
    w2.assign_sub(lr*grads[2])
    b2.assign_sub(lr*grads[3])
    #输出
    if step%1000 == 0:#每迭代1000次打印一次loss
        print(step,'loss',float(loss))
plt.plot(All_loss)
x_test = tf.convert_to_tensor(x_test,dtype=tf.float32)
y_test = tf.convert_to_tensor(y_test,dtype=tf.int32)
out2 = tf.nn.softmax(tf.nn.relu( tf.nn.sigmoid(tf.nn.relu(x_test@w1 + b1)@w2 + b2)))
y_predict = tf.math.argmax(out2,axis=-1)
y_test = tf.cast(y_test,tf.int64)
y_c = tf.math.equal(y_predict,y_test)
y_c = tf.cast(y_c,tf.int64)
r = tf.math.reduce_sum(y_c)/len(y_test)
print('准确率为:',r,"%")

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值