MNIST数据集
from tensorflow.contrib.layers import fully_connected
import tensorflow as tf
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
learning_rate = 0.001
# 占位符
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
# RNN单元
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons) # RNN基本单元,num_units为单元个数
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
# 全连接
logits = fully_connected(states, n_outputs, activation_fn=None)
# 损失函数及优化器
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
# 准确率
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# 初始化
init = tf.global_variables_initializer()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
# 训练,与前面DNN类似,不在叙述了
n_epochs = 100
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(train_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
细胞数据集
import os
import time
import cv2 as cv
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import (Dense, Dropout, SimpleRNN, Embedding)
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
learning_rate = 0.001
# ——————————————————————————————————————————————function————————————————————————————————————————————————————
def set_gpus(gpu_index):
if type(gpu_index) == list:
gpu_index = ','.join(str(_) for _ in gpu_index)
if type(gpu_index) ==int:
gpu_index = str(gpu_index)
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_index
def get_path(filepath):
filelist = []
filedirs = []
for root, _, files in os.walk(filepath, topdown = False):
for name in files:
filelist.append(name)
filedirs.append(os.path.join(root, name))
return filelist, filedirs
def read_file(path,filelist, filedirs, Use_smote = False):
dic ={'DCIS':0, 'IDC':0, 'Muc':0, 'ILC':0, 'MC':0,
'normal':1,
'TIL':2}
datalist = []
claslist = []
for path in filedirs:
img = cv.imread(path, 0) #单通道读入灰度图像
img = img.flatten() / 255
name = filelist[filedirs.index(path)]
clas = name.split('_')[0] #保留_前面的字符串即:'DCIS'...
datalist.append(img)
claslist.append(dic[clas])
claslist = np.array(claslist)
datalist = np.array(datalist)
return datalist, claslist
def RNNnetwork(datalist, claslist):
# 输入图片形状(14536, 51, 51)
model = Sequential()
model.add(Embedding(input_dim=1, output_dim=1))
model.add(Dropout(0.5))
model.add(SimpleRNN(16))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model.fit(datalist, claslist, epochs=10, batch_size=100)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
return model.evaluate(datalist, claslist, verbose=2)
# ————————————————————————————————————————————————main——————————————————————————————————————————————————————
set_gpus(0)
time_start = time.time()
path = 'C:\\Users\\Administrator\\Desktop\\Tensorflow\\cells\\train'
filepath, filedirs = get_path(path)
datalist, claslist = read_file(path, filepath, filedirs, Use_smote = False)
print('读入数据完毕,开始训练')
results = RNNnetwork(datalist, claslist)
print('RNN的loss和正确率分别为:', results)
time_end = time.time()
print('耗时:', time_end-time_start, 's')