用DCGAN训练并生成自己的图像集(含tensorflow代码)

第一片代码model_CT.py用于G和D的构造

# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 20:33:14 2018
E-mail: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""


import tensorflow as tf
import numpy as np


# pooling and convolution definition
def conv2d(x, W):
    return tf.nn.conv2d(input = x, filter = W, strides = [1,1,1,1], padding = 'SAME')

def avg_pool_2x2(x):
    return tf.nn.avg_pool(x, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')

def xavier_init(size):
    in_dim = size[0]
    xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
    return tf.random_normal(shape=size, stddev=xavier_stddev)
def sample_z(shape):
    return np.random.uniform(-1., 1., size=shape)

# discriminator
def discriminator(x_image, reuse=False):
    with tf.variable_scope('discriminator') as scope:
        if (reuse):
            tf.get_variable_scope().reuse_variables()
        #First Conv and Pool Layers
        W_conv1 = tf.get_variable('d_wconv1', shape = [5, 5, 1, 8], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv1 = tf.get_variable('d_bconv1', shape = [8], initializer=tf.constant_initializer(0))
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)        
        h_pool1 = avg_pool_2x2(h_conv1)

        #Second Conv and Pool Layers
        W_conv2 = tf.get_variable('d_wconv2', shape = [5, 5, 8, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv2 = tf.get_variable('d_bconv2', shape = [16], initializer=tf.constant_initializer(0))
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = avg_pool_2x2(h_conv2)
        ##----------------------------------------------------------------------------->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        #Third Conv and Pool Layers
        W_conv3 = tf.get_variable('d_wconv3', shape = [5, 5, 16, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv3 = tf.get_variable('d_bconv3', shape = [32], initializer=tf.constant_initializer(0))
        h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
        h_pool3 = avg_pool_2x2(h_conv3)

        W_conv4 = tf.get_variable('d_wconv4', shape = [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv4 = tf.get_variable('d_bconv4', shape = [64], initializer=tf.constant_initializer(0))
        h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
        h_pool4 = avg_pool_2x2(h_conv4)

        #First Fully Connected Layer
        W_fc1 = tf.get_variable('d_wfc1', [14 * 12 * 64, 320], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_fc1 = tf.get_variable('d_bfc1', [320], initializer=tf.constant_initializer(0))
        h_pool4_flat = tf.reshape(h_pool4, [-1, 14 * 12 * 64]) # reshape the tensor into vector form
        h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)

        #Second Fully Connected Layer
        W_fc2 = tf.get_variable('d_wfc2', [320, 80], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_fc2 = tf.get_variable('d_bfc2', [80], initializer=tf.constant_initializer(0))
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)


        #Third Fully Connected Layer
        W_fc3 = tf.get_variable('d_wfc3', [80, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_fc3 = tf.get_variable('d_bfc3', [1], initializer=tf.constant_initializer(0))

        #Final Layer
        y_conv=(tf.matmul(h_fc2, W_fc3) + b_fc3)
    return y_conv


# generator from DCGAN, take a d-dimensional vector as input and upsample it to become a 28*28 image
# the structure is from https://arxiv.org/pdf/1511.06434v2.pdf
def generator(z, batch_size, z_dim, reuse = False):
    with tf.variable_scope('generator') as scope:
        if (reuse):
            tf.get_variable_scope().reuse_variables()
        ## number of filters for the first layer of generator 
        g_dim = 64
        ## color dimension of output 
        c_dim = 1
        ## size of output image
        s_w = 221
        s_h = 181
        s_w2, s_w4, s_w8, s_w16, s_w32, s_w64 = int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16), int(s_w/32), int(s_w/64)
        s_h2, s_h4, s_h8, s_h16, s_h32, s_h64 = int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16), int(s_h/32), int(s_h/64)

        # h0 dimension is [batch_size, z_width, z_height, z_channel] 
        h0 = tf.reshape(z, [batch_size, s_w64+1, s_h64+1, g_dim]) # s_w64, s_h64 = [3,2] --> 4*3*64
        h0 = tf.nn.relu(h0)
        ##Dimensions of h0 = batch_size x 4 x 3 x 64 = batch_size*768

        # first decovolution layer (fractionally-strided convolution layer)

        ## useful link for convolution :
        ## https://blog.csdn.net/mao_xiao_feng/article/details/71713358
        output1_shape = [batch_size, s_w32+1, s_h32+1, c_dim*256] # s_w32, s_h32 = [6,5] --> 7*6*256
        ## W_conv1 shape = [filter_height, filter_width, out_channels, in_channels]
        W_conv1 = tf.get_variable('g_wconv1', shape = [5,5,output1_shape[-1],int(h0.get_shape()[-1])],
                                    initializer=tf.truncated_normal_initializer(stddev = 0.1)
                                    )
        b_conv1 = tf.get_variable('g_bconv1', shape = [output1_shape[-1]], initializer=tf.constant_initializer(.1))
        ## H_conv1: h0 * W_conv1.T
        H_conv1 = tf.nn.conv2d_transpose(h0, W_conv1, output_shape = output1_shape, strides = [1,2,2,1], 
                                         padding = 'SAME')
        H_conv1 = tf.add(H_conv1, b_conv1)
        H_conv1 = tf.contrib.layers.batch_norm(inputs = H_conv1, center=True, scale=True, is_training=True, scope="g_bn1")
        H_conv1 = tf.nn.relu(H_conv1)
        ##Dimensions of H_conv1 = batch_size x 7 x 6 x ?

        # second deconvolution layer
        output2_shape = [batch_size, s_w16+1, s_h16+1, c_dim*128] # s_w16, s_h16 = [13,11] --> 14*12*?
        W_conv2 = tf.get_variable('g_wconv2', shape = [5,5,output2_shape[-1], int(H_conv1.get_shape()[-1])],
                                  initializer=tf.truncated_normal_initializer(stddev = 0.1))
        b_conv2 = tf.get_variable('g_bconv2', shape = [output2_shape[-1]], initializer=tf.truncated_normal_initializer(0.1))
        H_conv2 = tf.nn.conv2d_transpose(H_conv1, W_conv2, output_shape = output2_shape, strides = [1,2,2,1],
                               padding = 'SAME')
        H_conv2 = tf.add(H_conv2, b_conv2)
        H_conv2 = tf.contrib.layers.batch_norm(inputs = H_conv2, center=True, scale=True, is_training=True, scope="g_bn2")    
        ##Dimensions of H_conv2 = batch_size x 14 x 12 x ?
        H_conv2 = tf.nn.relu(H_conv2)


        #third DeConv Layer
        output3_shape = [batch_size, s_w8+1, s_h8+1, c_dim*64] # s_w8, s_h8 = [27, 22] --> 28*23*?
        W_conv3 = tf.get_variable('g_wconv3', [5, 5, output3_shape[-1], int(H_conv2.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv3 = tf.get_variable('g_bconv3', [output3_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv3 = tf.nn.conv2d_transpose(H_conv2, W_conv3, output_shape=output3_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv3 = tf.add(H_conv3, b_conv3)
        H_conv3 = tf.contrib.layers.batch_norm(inputs = H_conv3, center=True, scale=True, is_training=True, scope="g_bn3")
        H_conv3 = tf.nn.relu(H_conv3)
        #Dimensions of H_conv3 = batch_size x 28 x 23 x ?

        #forth DeConv Layer
        output4_shape = [batch_size, s_w4+1, s_h4+1, c_dim*32] # s_w4, s_h8 = [55, 45] --> 56*46*?
        W_conv4 = tf.get_variable('g_wconv4', [5, 5, output4_shape[-1], int(H_conv3.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv4 = tf.get_variable('g_bconv4', [output4_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv4 = tf.nn.conv2d_transpose(H_conv3, W_conv4, output_shape=output4_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv4 = tf.add(H_conv4, b_conv4)
        H_conv4 = tf.contrib.layers.batch_norm(inputs = H_conv4, center=True, scale=True, is_training=True, scope="g_bn4")
        H_conv4 = tf.nn.relu(H_conv4)
        #Dimensions of H_conv3 = batch_size x 56 x 46 x ?

        #fifth DeConv Layer
        output5_shape = [batch_size, s_w2+1, s_h2+1, c_dim*16] # s_w4, s_h8 = [110, 90] --> 111*91*?
        W_conv5 = tf.get_variable('g_wconv5', [5, 5, output5_shape[-1], int(H_conv4.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv5 = tf.get_variable('g_bconv5', [output5_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv5 = tf.nn.conv2d_transpose(H_conv4, W_conv5, output_shape=output5_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv5 = tf.add(H_conv5, b_conv5)
        H_conv5 = tf.contrib.layers.batch_norm(inputs = H_conv5, center=True, scale=True, is_training=True, scope="g_bn5")
        H_conv5 = tf.nn.relu(H_conv5)
        #Dimensions of H_conv3 = batch_size x 111 x 91 x ?

        #Sixth DeConv Layer
        output6_shape = [batch_size, s_w, s_h, c_dim]
        W_conv6 = tf.get_variable('g_wconv6', [5, 5, output6_shape[-1], int(H_conv5.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv6 = tf.get_variable('g_bconv6', [output6_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv6 = tf.nn.conv2d_transpose(H_conv5, W_conv6, output_shape=output6_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv6 = tf.add(H_conv6, b_conv6)
        H_conv6 = tf.nn.tanh(H_conv6)
        #Dimensions of H_conv4 = batch_size x 28 x 28 x 1
        return H_conv6


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
第二片代码data_generate_CT.py设计pipeline用于读取batch数据:
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 15:40:11 2018
E-mail: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""

import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt


def get_files(file_dir):
    lung_img = [];
    label_lung_img = [];
    for file in os.listdir(file_dir):
        lung_img.append( file_dir + file)
        label_lung_img.append(1)

    image_list = np.hstack((lung_img))

    label_list = np.hstack((label_lung_img))

    temp = np.array([lung_img, label_lung_img]).T
    #利用shuffle打乱数据
    np.random.shuffle(temp)
    image_list = list(temp[:,0])
    label_list = list(temp[:,1])
    label_list = [int(i) for i in label_list]
    return image_list, label_list
#       
#将上面生成的List传入get_batch() ,转换类型,产生一个输入队列queue,因为img和lab  
#是分开的,所以使用tf.train.slice_input_producer(),然后用tf.read_file()从队列中读取图像
def get_batch(image,label,batch_size):

    image_W, image_H = 221, 181 

    #将python.list类型转换成tf能够识别的格式
    image=tf.cast(image,tf.string)
    label=tf.cast(label,tf.int32)

    #产生一个输入队列queue
    epoch_num = 50 #防止无限循环
    input_queue=tf.train.slice_input_producer([image,label], num_epochs=epoch_num)

    label=input_queue[1]
    image_contents=tf.read_file(input_queue[0])
    #将图像解码,不同类型的图像不能混在一起,要么只用jpeg,要么只用png等。  
    image=tf.image.decode_png(image_contents,channels=1)

    #将数据预处理,对图像进行旋转、缩放、裁剪、归一化等操作,让计算出的模型更健壮。
    image=tf.image.resize_image_with_crop_or_pad(image,image_W,image_H)
    image=tf.image.per_image_standardization(image)

    #生成batch
    min_after_dequeue=1000
    capacity=min_after_dequeue+300*batch_size
    image_batch,label_batch=tf.train.shuffle_batch([image,label],batch_size=batch_size,num_threads=1024,capacity=capacity,min_after_dequeue=min_after_dequeue)

    #重新排列标签,行数为[batch_size]
#    label_batch=tf.reshape(label_batch,[batch_size])
    image_batch = tf.reshape(image_batch,[batch_size,image_W,image_H,1])
    image_batch=tf.cast(image_batch,np.float32)

    return image_batch, label_batch

if __name__ == "__main__":
    #训练样本在本地磁盘中的地址
    file_dir='D:\\CT_data\\Data_preprocessing\\' # 这里是输入数据的地址   
    image_list, label_list = get_files(file_dir)
    image_batch, label_batch = get_batch(image_list, label_list, 28)
    with tf.Session() as sess:
        ## 初始化工作
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        j = 0
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            while not coord.should_stop() and j<5: # 加上i其实是强制终止线程,但是文件队列线程并没有结束,因为我们产生的文件队列结束为epoch_num个epoch
#                for i in range(5):
                img, label = sess.run([image_batch, label_batch])

                # just test one batch
#                for j in np.arange(64):
#                    print('label: %d' %label[j])
#                    plt.imshow(img[j,:,:,0])
#                    plt.show()
                plt.imshow(img[0,:,:,0])
                plt.show()
                j+=1
#                j = 1

        except tf.errors.OutOfRangeError:
            print('done!')
        finally:
            coord.request_stop()
            print('-----------')
        coord.join(threads)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
第三片代码train_CT.py用于训练GAN
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 14:57:23 2018

@author: DidiLv
"""

# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 09:42:35 2018
E-mail: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""
import model_CT

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#import random
import data_generate_CT


#训练样本在本地磁盘中的地址
file_dir='D:\\CT_data\\Data_preprocessing\\' # 这里是输入数据的地址   


tf.reset_default_graph()
batch_size = 10
image_W = 221
image_H = 181 
image_C = 1
z_dimensions = 4*3*64

image_list, label_list = data_generate_CT.get_files(file_dir)
image_batch, _ = data_generate_CT.get_batch(image_list, label_list, batch_size)
# reset the graph to reset all variables we test before


##---------------------------------------------------------------------------------------------
# discriminator for input
#x_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, 28, 28, 1])
#x_placeholder = tf.Variable(image_batch,dtype = tf.float32)# it's wrong if image_batch is already a tensor 
x_placeholder = image_batch
#z_placeholder = tf.placeholder(dtype = tf.float32, shape = [None,z_dimensions])
z_placeholder = tf.Variable(np.random.normal(-1, 1, size=[batch_size, z_dimensions]), dtype = tf.float32)
##--------------------------------------------------------------------------------------------

Dx = model_CT.discriminator(x_placeholder) # for real training data
Gz = model_CT.generator(z_placeholder, batch_size, z_dimensions)
Dg = model_CT.discriminator(Gz, reuse=True)


g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.ones_like(Dg)))
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dx, labels = tf.ones_like(Dx)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels = tf.zeros_like(Dx)))
d_loss = d_loss_real + d_loss_fake

tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]

with tf.variable_scope(tf.get_variable_scope(), reuse = False):
    # var_list: tf.Variable to update to minimize loss
    trainerD = tf.train.AdadeltaOptimizer(learning_rate = 1e-3).minimize(d_loss, var_list = d_vars)
    trainerG = tf.train.AdadeltaOptimizer(learning_rate = 1e-3).minimize(g_loss, var_list = g_vars)

iterations = 3000
##------------------------------------------------------------------------------------------------------------------------------------------------
#for i in range(iterations):
#    _,dLoss = sess.run([trainerD, d_loss]) #Update the discriminator
#    _,gLoss = sess.run([trainerG, g_loss]) #Update the generator
#    print((dLoss+gLoss))
##----------------------------------------------------------------------------------------------------------------------------------------------
with tf.Session() as sess:
        ## 初始化工作
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        i = 0
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            while not coord.should_stop() and i<iterations: # 加上i其实是强制终止线程,但是文件队列线程并没有结束,因为我们产生的文件队列结束为epoch_num个epoch                
                print(i)               
                _,dLoss = sess.run([trainerD, d_loss]) #Update the discriminator
                _,gLoss = sess.run([trainerG, g_loss]) #Update the generator
                print((dLoss+gLoss))
                i+=1

        except tf.errors.OutOfRangeError:
            print('done!')
        finally:
            coord.request_stop()
            print('-----------')
        coord.join(threads)

--------------------- 
作者:Eric2016_Lv 
来源:CSDN 
原文:https://blog.csdn.net/eric2016_lv/article/details/81239585 
版权声明:本文为博主原创文章,转载请附上博文链接!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值