1.最简单的结构:卷积+激活+池化+全连接+分类输出
2.“CNN计算过程具体推导”+“更复杂的网络结构”有时间或者有必要再更!!!
3.数据链接参见最后
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 22:32:02 2020
@author:飞鱼路上
"""
import numpy as np
from sklearn.model_selection import train_test_split #数据集的分割函数
from sklearn.preprocessing import StandardScaler #数据预处理
import pandas as pd
import warnings
import tensorflow as tf
warnings.filterwarnings('ignore')
graph=tf.get_default_graph() #分配内存
# In[]
#
def weight_variables(shape):
"""权重"""
w=tf.Variable(tf.random_normal(shape=shape,mean=0.0,stddev=1.0,seed=1))
# w=tf.Variable(tf.truncated_normal(shape=shape,mean=0.0,stddev=1.0,seed=1))
return w
def bias_variables(shape):
"""偏置"""
b=tf.Variable(tf.constant(0.0,shape=shape))
return b
def CNN_model(n_future,n_class,n_hidden,n_fc1,future_out):
"""模型结构"""
with tf.variable_scope("data"):
x=tf.placeholder(tf.float32,[None,n_future])
y_true=tf.placeholder(tf.int32,[None,n_class])
keep_prob = tf.placeholder(tf.float32)
with tf.variable_scope("cov1"):
w_conv1=weight_variables([1,3,1,n_hidden])
b_conv1=bias_variables([n_hidden])
x_reshape = tf.reshape(x,[-1,1,n_future,1])
#卷积、激活
x_relu1=tf.nn.relu(tf.nn.conv2d(x_reshape,w_conv1,strides=[1,1,1,1],padding="SAME")+b_conv1)
#池化
x_pool1=tf.nn.max_pool(x_relu1,ksize=[1,1,2,1],strides=[1,1,2,1],padding="SAME")
with tf.variable_scope("conv_fc"):
#全连接
w_fc1=weight_variables([1*future_out*n_hidden,n_fc1])
b_fc1=bias_variables([n_fc1])
x_fc_reshape=tf.reshape(x_pool1,[-1,1*future_out*n_hidden])
y_fc1=tf.matmul(x_fc_reshape,w_fc1)+b_fc1
h_fc1_drop = tf.nn.dropout(y_fc1, keep_prob)
w_fc2=weight_variables([n_fc1,n_class])
b_fc2=bias_variables([n_class])
y_predict=tf.matmul(h_fc1_drop,w_fc2)+b_fc2
with tf.variable_scope("soft_cross"):
loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_predict))
with tf.variable_scope("opmizer"):
# train_op=tf.train.AdamOptimizer(0.01).minimize(loss)
train_op=tf.train.RMSPropOptimizer(0.001, 0.9).minimize(loss)
with tf.variable_scope("acc"):
Y_truelable=tf.argmax(y_true,1)
Y_predictlable=tf.argmax(y_predict,1)
equal_list=tf.equal(Y_truelable,Y_predictlable)
#equal_list None个样本
accuracy=tf.reduce_mean(tf.cast(equal_list,tf.float32))
return x,y_true,keep_prob,train_op,accuracy
# In[]
#数据读取及划分
url = 'C:/Users/weixifei/Desktop/TensorFlow程序/data.csv'
data = pd. read_csv(url, sep=',',header=None)
data=np.array(data)
X_data=data[:,:23]
Y=data[:,23]
labels=np.asarray(pd.get_dummies(Y),dtype=np.int8)
X_train,X_,Y_train,Y_=train_test_split(X_data,labels,test_size=0.3,random_state=20)
X_test,X_vld,Y_test,Y_vld=train_test_split(X_,Y_,test_size=0.1,random_state=20)
# In[]
#数据标准化处理
stdsc = StandardScaler()
X_train=stdsc.fit_transform(X_train)
X_test=stdsc.fit_transform(X_test)
X_vld=stdsc.fit_transform(X_vld)
# In[]
n_future=23
n_class=12
n_hidden=16
n_fc1=256
if n_future % 2==0:
future_out=n_future//2
else:
future_out=n_future//2+1
#模型调用
x,y_true,keep_prob,train_op,accuracy=CNN_model(n_future,n_class,n_hidden,n_fc1,future_out)
init_op=tf.global_variables_initializer()
training_epochs=5
batch_size = 64
total_batches=X_train.shape[0]//batch_size
# In[]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
for i in range(total_batches):
start=(i*batch_size)%X_train.shape[0] #
end=start+batch_size
sess.run(train_op,feed_dict={x:X_train[start:end],y_true:Y_train[start:end],keep_prob:0.5})
if i % 50==0:
print("Epoch %d,Steps %d,validation accuracy:%f"%(epoch+1,i,sess.run(accuracy,feed_dict={x:X_vld,y_true:Y_vld,keep_prob:1})))
print("test accuracy:%f"%(sess.run(accuracy,feed_dict={x:X_test,y_true:Y_test,keep_prob:1})))
数据链接
百度网盘:https://pan.baidu.com/s/1eyDFV6YVaOHwr9QrRcKXFg
提取码:rlne
额外数据集:fashion-mnist,office_caltech_10