matlabe基于残差学习的图像去噪_tensorflow写CAE用残差学习图像去噪无效果,求大神帮忙找出问题...

该楼层疑似违规已被系统折叠 隐藏此楼查看此楼

import tensorflow as tf

import os

import glob

import matplotlib.pyplot as plt

import numpy as np

#config.gpu_options.per_process_gpu_memory_fraction = 0.3

#set_session(tf.Session(config=config))

# images_dir 下存放着需要预处理的图像

images_dir = 'D:/zhtdata/data/BSR/BSDS500/data/images/train/'

# 查找图片文件, 根据具体数据集自由添加各种图片格式(jpg, jpeg, png, bmp等等)

images_paths = glob.glob(images_dir+'*.jpg')

images_paths += glob.glob(images_dir+'*.jpeg')

images_paths += glob.glob(images_dir+'*.png')

print('Find {} images, the first 10 image paths are:'.format(len(images_paths)))

for path in images_paths[:10]:

print(path)

# split training set and test data

test_split_factor = 0.2

n_test_path = int(len(images_paths)*test_split_factor)

# 转出numpy数据,方便使用

train_image_paths = np.asarray(images_paths[:-n_test_path])

test_image_paths = np.asarray(images_paths[-n_test_path:])

print('Number of train set is {}'.format(train_image_paths.shape[0]))

print('Number of test set is {}'.format(test_image_paths.shape[0]))

def gaussian_noise_layer(input_image, std):

noise = tf.random_normal(shape=tf.shape(input_image), mean=0.0, stddev=std, dtype=tf.float32)

noise_image = tf.cast(input_image, tf.float32) + noise

noise_image = tf.clip_by_value(noise_image, 0, 1.0)

return noise_image

def parse_data(filename):

'''

导入数据,进行预处理,输出两张图像,

分别是输入图像和目标图像(例如,在图像去噪中,输入的是一张带噪声图像,目标图像是无噪声图像)

Args:

filaneme, 图片的路径

Returns:

输入图像,目标图像

'''

# 读取图像

image = tf.read_file(filename)

# 解码图片

image = tf.image.decode_image(image)

# 数据预处理,或者数据增强,这一步根据需要自由发挥

# 随机提取patch

image = tf.random_crop(image, size=(128,128,3))

# 数据增强,随机水平翻转图像

image = tf.image.random_flip_left_right(image)

# 图像归一化

image = tf.cast(image, tf.float32) / 255.0

# 加噪声

n_image =gaussian_noise_layer(image, 0.4)

return n_image, image

def train_generator(batchsize, shuffle=True):

'''

生成器,用于生产训练数据

Args:

batchsize,训练的batch size

shuffle, 是否随机打乱batch

Returns:

训练需要的数据

'''

with tf.Session() as sess:

# 创建数据库

train_dataset = tf.data.Dataset().from_tensor_slices((train_image_paths))

# 预处理数据

train_dataset = train_dataset.map(parse_data)

# 设置 batch size

train_dataset = train_dataset.batch(batchsize)

# 无限重复数据

train_dataset = train_dataset.repeat()

# 洗牌,打乱

if shuffle:

train_dataset = train_dataset.shuffle(buffer_size=4)

# 创建迭代器

train_iterator = train_dataset.make_initializable_iterator()

sess.run(train_iterator.initializer)

train_batch = train_iterator.get_next()

# 开始生成数据

while True:

try:

x_batch, y_batch = sess.run(train_batch)

yield (x_batch, y_batch)

except:

# 如果没有 train_dataset = train_dataset.repeat()

# 数据遍历完就到end了,就会抛出异常

train_iterator = train_dataset.make_initializable_iterator()

sess.run(train_iterator.initializer)

train_batch = train_iterator.get_next()

x_batch, y_batch = sess.run(train_batch)

yield (x_batch, y_batch)

#%matplotlib inline

#%config InlineBackend.figure_format='retina'

def test_generator(batchsize, shuffle=True):

'''

生成器,用于生产测试数据

Args:

batchsize,训练的batch size

shuffle, 是否随机打乱batch

Returns:

训练需要的数据

'''

with tf.Session() as sess:

# 创建数据库

train_dataset = tf.data.Dataset().from_tensor_slices((test_image_paths))

# 预处理数据

train_dataset = train_dataset.map(parse_data)

# 设置 batch size

train_dataset = train_dataset.batch(batchsize)

# 无限重复数据

train_dataset = train_dataset.repeat()

# 洗牌,打乱

if shuffle:

train_dataset = train_dataset.shuffle(buffer_size=4)

# 创建迭代器

train_iterator = train_dataset.make_initializable_iterator()

sess.run(train_iterator.initializer)

train_batch = train_iterator.get_next()

# 开始生成数据

while True:

try:

x_batch, y_batch = sess.run(train_batch)

yield (x_batch, y_batch)

except:

# 如果没有 train_dataset = train_dataset.repeat()

# 数据遍历完就到end了,就会抛出异常

train_iterator = train_dataset.make_initializable_iterator()

sess.run(train_iterator.initializer)

train_batch = train_iterator.get_next()

x_batch, y_batch = sess.run(train_batch)

yield (x_batch, y_batch)

# 显示图像

def view_samples(samples, nrows, ncols, figsize=(5,5)):

fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize, sharey=True, sharex=True)

for ax, img in zip(axes.flatten(), samples):

ax.axis('off')

ax.set_adjustable('box-forced')

im = ax.imshow(img, aspect='equal')

plt.subplots_adjust(wspace=0, hspace=0)

plt.show()

return fig, axes

train_gen = train_generator(4)

test_gen = test_generator(4)

inputs_ = tf.placeholder(tf.float32,(None,128,128,3),name = 'inputs_')

targets_ = tf.placeholder(tf.float32,(None,128,128,3),name = 'targets_')

conv1 = tf.layers.conv2d(inputs_, 128, (3,3), padding='same', activation=tf.nn.relu)

conv2 = tf.layers.conv2d(conv1, 128, (3,3), padding='same', activation=tf.nn.relu)

conv3 = tf.layers.conv2d(conv2, 128, (3,3), padding='same', activation=tf.nn.relu)

conv4 = tf.layers.conv2d(conv3, 128, (3,3), padding='same', activation=tf.nn.relu)

conv5 = tf.layers.conv2d(conv4, 128, (3,3), padding='same', activation=tf.nn.relu)

conv6 = tf.layers.conv2d(conv5, 128, (3,3), padding='same', activation=tf.nn.relu)

conv7 = tf.layers.conv2d(conv6, 128, (3,3), padding='same', activation=tf.nn.relu)

conv8 = tf.layers.conv2d(conv7, 128, (3,3), padding='same', activation=tf.nn.relu)

conv9 = tf.layers.conv2d(conv8, 128, (3,3), padding='same', activation=tf.nn.relu)

conv10 = tf.layers.conv2d(conv9, 128, (3,3), padding='same', activation=tf.nn.relu)

conv11 = tf.layers.conv2d(conv10, 128, (3,3), padding='same', activation=tf.nn.relu)

conv12 = tf.layers.conv2d(conv11, 128, (3,3), padding='same', activation=tf.nn.relu)

conv13 = tf.layers.conv2d(conv12, 128, (3,3), padding='same', activation=tf.nn.relu)

conv14 = tf.layers.conv2d(conv13, 128, (3,3), padding='same', activation=tf.nn.relu)

conv15 = tf.layers.conv2d(conv14, 128, (3,3), padding='same', activation=tf.nn.relu)

deconv1 = conv14 + tf.layers.conv2d_transpose(conv15,128,(3,3), padding='same', activation=tf.nn.relu)

deconv2 = tf.layers.conv2d_transpose(deconv1,128,(3,3), padding='same', activation=tf.nn.relu)

deconv3 = conv12+tf.layers.conv2d_transpose(deconv2,128,(3,3), padding='same', activation=tf.nn.relu)

deconv4 =tf.layers.conv2d_transpose(deconv3,128,(3,3), padding='same', activation=tf.nn.relu)

deconv5 = conv10+tf.layers.conv2d_transpose(deconv4,128,(3,3), padding='same', activation=tf.nn.relu)

deconv6 = tf.layers.conv2d_transpose(deconv5,128,(3,3), padding='same', activation=tf.nn.relu)

deconv7 = conv8 + tf.layers.conv2d_transpose(deconv6,128,(3,3), padding='same', activation=tf.nn.relu)

deconv8 = tf.layers.conv2d_transpose(deconv7,128,(3,3), padding='same', activation=tf.nn.relu)

deconv9 = conv6 + tf.layers.conv2d_transpose(deconv8,128,(3,3), padding='same', activation=tf.nn.relu)

deconv10 = tf.layers.conv2d_transpose(deconv9,128,(3,3), padding='same', activation=tf.nn.relu)

deconv11 =conv4 + tf.layers.conv2d_transpose(deconv10,128,(3,3), padding='same', activation=tf.nn.relu)

deconv12 =tf.layers.conv2d_transpose(deconv11,128,(3,3), padding='same', activation=tf.nn.relu)

deconv13 =conv2 + tf.layers.conv2d_transpose(deconv12,128,(3,3), padding='same', activation=tf.nn.relu)

deconv14 = tf.layers.conv2d_transpose(deconv13,128,(3,3), padding='same', activation=tf.nn.relu)

deconv15 = tf.layers.conv2d_transpose(deconv14,3,(3,3), padding='same', activation=None)

outputs_ = tf.nn.relu(deconv15)

loss = tf.square(targets_-outputs_)

cost = tf.reduce_mean(loss) #整体求均值,可求行,列的均值

optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)

sess = tf.Session()

epochs = 10

batch_size = 4

iteration = 40

sess.run(tf.global_variables_initializer())

for e in range(epochs):

for idx in range(iteration):

noise_x,x = next(train_gen)

test_noise,test_clean = next(test_gen)

batch_cost, _ = sess.run([cost, optimizer],

feed_dict={inputs_: noise_x,

targets_: noise_x-x})

print("Epoch: {}/{} ".format(e+1, epochs),

"Training loss: {:.4f}".format(batch_cost))

y_pre = sess.run(logits_, feed_dict={inputs_: test_noise})

_ = view_samples(test_noise, 1, 4)

_ = view_samples(test_clean, 1, 4)

y = test_noise-y_pre

_ = view_samples(y, 1,4 )

_ = view_samples(y_pre, 1,4 )

#==============================================================================

# for i in range(iteration):

# noise_x, x = next(train_gen)

#

# noise_x = noise_x.reshape(-1,40,40,1)

#

# batch_cost, _ = sess.run([cost, optimizer], feed_dict={inputs_: noise_x,

# targets_: x})

# print("Epoch: {}/{}...".format(e+1, epochs),

# "Training loss: {:.4f}".format(batch_cost))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值