代码:
import numpy as np
import h5py
import tensorflow as tf
import time
from sklearn import preprocessing
import matplotlib.pyplot as plt
import math
# initial weight
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1) # 初始化赋值,符合正态分布,标准差为0.1
return tf.Variable(initial)
# initial bias
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape) # 初始化bias,用0.1填充
return tf.Variable(initial)
# convolution layer
def conv3d(x, W):
return tf.nn.conv3d(input=x, filter=W, strides=[1, 1, 1, 1, 1], padding='VALID')
# pooling layer
def max_pool_3d(x):
return tf.nn.max_pool3d(input=x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
# load data
cube_f_train = h5py.File("./data/cube_train_non_1159.hdf5", "r")
cube_f_test = h5py.File("./data/cube_test_non_1159.hdf5", "r")
cube_train_data = cube_f_train['data']
train_labels = cube_f_train['labels']
scaler = preprocessing.StandardScaler().fit(train_labels)
train_labels_n = scaler.transform(train_labels)
cube_test_data = cube_f_test['data']
test_labels = cube_f_test['labels']
test_labels_n = scaler.transform(test_labels)
# batch data
def get_batch_data(data, batch, batch_size):
if data == 'train':
batch_cube_xs = cube_train_data[batch * batch_size: (batch + 1) * batch_size]
batch_ys = train_labe