CT图之cnn

53 篇文章 1 订阅
33 篇文章 1 订阅
# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from six.moves import  xrange
#get_ipython().magic(u'matplotlib inline')
train_dataset = np.load('/home/sys-04/tensorflow/train_dataset.npy')
train_labels = np.load('/home/sys-04/tensorflow/train_labels.npy')
test_dataset = np.load('/home/sys-04/tensorflow/test_dataset.npy')
test_labels = np.load('/home/sys-04/tensorflow/test_labels.npy')
map = {key:val for key, val in enumerate('hs')}
def plot_check(matrix, key):
    plt.imshow(matrix)
    plt.show()
    print('the picture should be  ', map[key])
    return None
"""
length = test_dataset.shape[0]-1
for _ in range(10):
    index = np.random.randint(length)
    plot_check(test_dataset[index,:,:], test_labels[index])
 """ 

image_height = 2000
image_width = 1600
num_channels = 1
num_labels = 2
x = tf.placeholder(tf.float32, shape=[None, image_height, image_width,
                                      num_channels])
y_ = tf.placeholder(tf.float32, shape=[None, 2])

#卷积神经网络需要四维的数据,one-hot的标签
def reformat(dataset, labels):  
    dataset = dataset.reshape((-1, image_height, image_width, num_channels)).astype(np.float32)  
    labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)  
    return dataset, labels  
train_dataset, train_labels = reformat(train_dataset, train_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print(train_dataset.shape, train_labels.shape)
print(test_dataset.shape, test_labels.shape)

sess = tf.InteractiveSession()
#dataset = tf.placeholder(tf.float32, shape = [None, image_height, image_width, 1])
#labels = tf.placeholder(tf.float32, shape = [None, 2])

def weight_variable(shape, name):
    initial = tf.truncated_normal(shape, stddev=0.1, name = name)
    return tf.Variable(initial)

def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape, name = name)
    return tf.Variable(initial)

def conv2d(x, W, name):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name = name)

def max_pool_2x2(x, name):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name = name)


#构建计算图
#conv-pool-1
with tf.name_scope('conv-pool-1'):
    W_conv1 = weight_variable([5,5,1,32], 'W_conv1')
    b_conv1 = bias_variable([32], 'b_conv1')
    h_conv1 = tf.nn.relu(conv2d(x, W_conv1, 'h_conv1') + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1, 'h_pool1')

#conv-pool-2
with tf.name_scope('conv-pool-2'):
    W_conv2 = weight_variable([5,5,32,64], 'W_conv2')
    b_conv2 = bias_variable([64], 'b_conv2')
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 'h_conv2') + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2, 'h_pool2')

#conv-pool-3
with tf.name_scope('conv-pool-3'):
    W_conv3 = weight_variable([5,5,64,128], 'W_conv3')
    b_conv3 = bias_variable([128], 'b_conv3')
    h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3, 'h_conv3') + b_conv3)
    h_pool3 = max_pool_2x2(h_conv3, 'h_pool3')

#conv-pool-4
with tf.name_scope('conv-pool-4'):
    W_conv4 = weight_variable([5,5,128,256], 'W_conv4')
    b_conv4 = bias_variable([256], 'b_conv4')
    h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4, 'h_conv4') + b_conv4)
    h_pool4 = max_pool_2x2(h_conv4, 'h_pool4')

#全链接层
with tf.name_scope('Densely-Layer'):
    W_fc1 = weight_variable([125 * 100 * 256, 1024], 'W_fc1')
    b_fc1 = bias_variable([1024], 'b_fc1')

    h_pool4_flat = tf.reshape(h_pool4, [-1, 125 * 100 * 256])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1, name = 'h_fc1')

#dropout 处理过拟合
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

#输出层
with tf.name_scope('output'):
    W_fc2 = weight_variable([1024, 2], 'W_fc2')
    b_fc2 = bias_variable([2], 'b_fc2')

    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2



#训练模型,评估模型
#交叉熵,计算损失函数
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y_conv)) 

train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) #优化器降低损失
#tf.argmax(y_conv, 1)最有可能的分类,tf.argmax(labels,1)真实的标签 ,得到一组bool的列表
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
#转换为数字,然后求均值。估计模型的准确度
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  #计算准确度

sess.run(tf.global_variables_initializer())
print('initialized')
train_batch_size = 20
test_batch_size = 10
for i in range(801):
        print('step %s'%i)
        offset = (i * train_batch_size) % (train_labels.shape[0] - train_batch_size)
        xs = train_dataset[offset:(offset + train_batch_size), :, :, :]  
        ys = train_labels[offset:(offset + train_batch_size), :]  
        feed_dict={x:xs,y_:ys,keep_prob:0.5}      
        if i%10 == 0:
              train_accuracy = accuracy.eval(feed_dict=feed_dict)
              print("step %d, training accuracy %g"%(i, train_accuracy))
        train_step.run(feed_dict = feed_dict)

print("test accuracy %g"%accuracy.eval(feed_dict={
   dataset:test_dataset, labels:test_labels, keep_prob:1.0}))

注意分batch喂数据大小防止内存溢出

xs = train_dataset[offset:(offset + train_batch_size), :, :, :]
ys = train_labels[offset:(offset + train_batch_size), :]
注意x、y_的参数设置

总算是能跑了,,虽然准确率有点低吧,但开心@_@
果然适当的放下之后,再拿起来时才能沉下心去分析它。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值