EMFusion: An unsupervised enhanced medical image fusion network(表层:显著性和丰富度和深层约束:编码的唯一信道)

一、网络的主要框架

二、融合框架

训练一个名为FusionNet的网络来学习融合结果与源图像之间的映射关系。它将两个源图像的拼接作为输入,并直接输出:

class fuse(object):
	def __init__(self, scope_name):
		self.scope = scope_name
		self.weight_vars = []

		with tf.variable_scope(self.scope):
			with tf.variable_scope('fuse'):
				self.weight_vars.append(self._create_variables(2, 32, 3, scope = 'conv1'))
				self.weight_vars.append(self._create_variables(32, 32, 3, scope = 'conv2'))
				self.weight_vars.append(self._create_variables(64, 32, 3, scope = 'conv3'))
				self.weight_vars.append(self._create_variables(96, 32, 3, scope = 'conv4'))
				# self.weight_vars.append(self._create_variables(192, 48, 3, scope = 'conv5'))
				# self.weight_vars.append(self._create_variables(230, 36, 5, scope = 'conv6'))
				# self.weight_vars.append(self._create_variables(240, 48, 3, scope = 'dense_block_conv6'))
				self.weight_vars.append(self._create_variables(128, 64, 3, scope = 'conv6'))
				self.weight_vars.append(self._create_variables(64, 32, 3, scope = 'conv7'))
				self.weight_vars.append(self._create_variables(32, 1, 3, scope = 'conv8'))
				# self.weight_vars.append(self._create_variables(16, 4, 3, scope = 'conv9'))
 

三、深层次约束:唯一信息通道

一个名为TransNet的网络被训练学习

网络结构:

四、编码解码网络

class pC_ED(object):
	def __init__(self, sco):
		self.encoder = Encoder(sco)
		self.decoder = Decoder(sco)
		self.var_list = []
		self.features = None

	def transform(self, I, is_training, reuse):
		code = self.encoder.encode(I, is_training, reuse)
		self.features = code
		I2 = self.decoder.decode(code, is_training, reuse)
		# self.var_list.extend(self.encoder.var_list)
		# self.var_list.extend(self.decoder.var_list)
		# self.var_list.extend(tf.trainable_variables())
		return I2


class Encoder(object):
	def __init__(self, scope_name):
		self.scope = scope_name
		self.var_list = []
		self.weight_vars = []

		with tf.variable_scope(self.scope):
			with tf.variable_scope('encoder'):
				self.weight_vars.append(self._create_variables(1, 32, 3, scope = 'conv1'))
				self.weight_vars.append(self._create_variables(32, 64, 3, scope = 'conv2'))
				self.weight_vars.append(self._create_variables(64, 96, 3, scope = 'conv3'))

	def _create_variables(self, input_filters, output_filters, kernel_size, scope):
		shape = [kernel_size, kernel_size, input_filters, output_filters]
		with tf.variable_scope(scope):
			kernel = tf.Variable(tf.truncated_normal(shape, stddev = WEIGHT_INIT_STDDEV),
			                     name = 'kernel')
			bias = tf.Variable(tf.zeros([output_filters]), name = 'bias')
			self.var_list.append(kernel)
			self.var_list.append(bias)
		return (kernel, bias)

	def encode(self, image, is_training, reuse):
		out = image
		for i in range(len(self.weight_vars)):
			kernel, bias = self.weight_vars[i]
			out = conv2d(out, kernel, bias, use_lrelu = True, is_training = is_training,
			             reuse = reuse, Scope = self.scope + '/encoder/b' + str(i))
		return out


class Decoder(object):
	def __init__(self, scope_name):
		self.weight_vars = []
		self.var_list = []
		self.scope = scope_name
		with tf.name_scope(scope_name):
			with tf.variable_scope('decoder'):
				self.weight_vars.append(self._create_variables(96, 64, 3, scope = 'conv2_1'))
				self.weight_vars.append(self._create_variables(64, 32, 3, scope = 'conv2_2'))
				self.weight_vars.append(self._create_variables(32, 1, 3, scope = 'conv2_3'))

	def _create_variables(self, input_filters, output_filters, kernel_size, scope):
		with tf.variable_scope(scope):
			shape = [kernel_size, kernel_size, input_filters, output_filters]
			kernel = tf.Variable(tf.truncated_normal(shape, stddev = WEIGHT_INIT_STDDEV), name = 'kernel')
			bias = tf.Variable(tf.zeros([output_filters]), name = 'bias')
			self.var_list.append(kernel)
			self.var_list.append(bias)
		return (kernel, bias)

	def decode(self, image, is_training, reuse):
		final_layer_idx = len(self.weight_vars) - 1

		out = image
		for i in range(len(self.weight_vars)):
			kernel, bias = self.weight_vars[i]
			if i == final_layer_idx:
				out = conv2d(out, kernel, bias, use_lrelu = False,
				             Scope = self.scope + '/decoder/b' + str(i),  is_training = is_training, reuse=reuse)
				out = tf.nn.tanh(out) / 2 + 0.5
				print("out shape:", out.shape)
			else:
				out = conv2d(out, kernel, bias, use_lrelu = True,
				             Scope = self.scope + '/decoder/b' + str(i), is_training = is_training, reuse=reuse)
		return out



def conv2d(x, kernel, bias, use_lrelu = True, Scope = None, stride = 1, is_training = False, reuse=False):
	# padding image with reflection mode
	x_padded = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode = 'REFLECT')
	# conv and add bias
	out = tf.nn.conv2d(input = x_padded, filter = kernel, strides = [1, stride, stride, 1], padding = 'VALID')
	out = tf.nn.bias_add(out, bias)
	# if BN:
	# 	with tf.variable_scope(Scope):
	# 		# print("Scope", Scope)
	# 		# print("reuse", not is_training)
	# 		# out = tf.contrib.layers.batch_norm(out, decay = 0.9, updates_collections = None, epsilon = 1e-5, scale = True, reuse = reuse)
	#
	# 		out = tf.layers.batch_normalization(out, training = is_training, reuse= reuse, trainable=is_training)
	if use_lrelu:
		# out = tf.nn.relu(out)
		out = tf.maximum(out, 0.1 * out)
	return out

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值