运行这个深度谱聚类网络的python程序 不报错也不停止啊 求大神指教~

代码 专栏收录该内容
2 篇文章 0 订阅

运行这个深度谱聚类网络的python程序 不报错也不停止啊 求大神指教~

from future import division, print_function, absolute_import

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.contrib import layers
from sklearn import cluster
from munkres import Munkres
import scipy.io as sio
from scipy.sparse.linalg import svds
from sklearn.preprocessing import normalize
from tensorflow.examples.tutorials.mnist import input_data

class ConvAE(object):
def init(self, n_input, kernel_size, n_hidden, reg_const1 = 1.0, reg_const2 = 1.0, reg = None, batch_size = 256,
# denoise = False, model_path = None, logs_path = ‘/home/pan/workspace-eclipse/deep-subspace-clustering/COIL20CodeModel/new/pretrain/logs’):
denoise = False, model_path = None, logs_path = ‘./logs/COIL20-logs’):
#n_hidden is a arrary contains the number of neurals on every layer
self.n_input = n_input
self.n_hidden = n_hidden
self.reg = reg
self.model_path = model_path
self.kernel_size = kernel_size
self.iter = 0
self.batch_size = batch_size
weights = self._initialize_weights()

	# model
	self.x = tf.placeholder(tf.float32, [None, self.n_input[0], self.n_input[1], 1])
	self.learning_rate = tf.placeholder(tf.float32, [])


	if denoise == False:
		x_input = self.x
		latent, shape = self.encoder(x_input, weights)

	else:
		x_input = tf.add(self.x, tf.random_normal(shape=tf.shape(self.x),
										   mean = 0,
										   stddev = 0.2,
										   dtype=tf.float32))

		latent,shape = self.encoder(x_input, weights)
	self.z_conv = tf.reshape(latent,[batch_size, -1])		
	self.z_ssc, Coef = self.selfexpressive_moduel(batch_size)	
	self.Coef = Coef						
	latent_de_ft = tf.reshape(self.z_ssc, tf.shape(latent))		
	self.x_r_ft = self.decoder(latent_de_ft, weights, shape)		
			

	self.saver = tf.train.Saver([v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))]) 
		
	
	self.cost_ssc = 0.5*tf.reduce_sum(tf.pow(tf.subtract(self.z_conv,self.z_ssc), 2))
	self.recon_ssc =  tf.reduce_sum(tf.pow(tf.subtract(self.x_r_ft, self.x), 2.0))
	self.reg_ssc = tf.reduce_sum(tf.pow(self.Coef,2))
	tf.summary.scalar("ssc_loss", self.cost_ssc)
	tf.summary.scalar("reg_lose", self.reg_ssc)		
	
	self.loss_ssc = self.cost_ssc*reg_const2 + reg_const1*self.reg_ssc + self.recon_ssc

	self.merged_summary_op = tf.summary.merge_all()		
	self.optimizer_ssc = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(self.loss_ssc)
	self.init = tf.global_variables_initializer()
	self.sess = tf.InteractiveSession()
	self.sess.run(self.init)
	self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

def _initialize_weights(self):
	all_weights = dict()
	#初始化程序 “Xavier”,这个初始化器是用来保持每一层的梯度大小都差不多相同。
	all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, n_hidden[0]],
		initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
	# Variable(initializer,name),参数initializer是初始化参数,name是可自定义的变量名称
	all_weights['enc_b0'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype = tf.float32))
	

	all_weights['dec_w0'] = tf.get_variable("dec_w0", shape=[self.kernel_size[0], self.kernel_size[0],1, n_hidden[0]],
		initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
	all_weights['dec_b0'] = tf.Variable(tf.zeros([1], dtype = tf.float32))
	return all_weights


# Building the encoder
def encoder(self,x, weights):
	shapes = []
	# Encoder Hidden layer with relu activation #1
	shapes.append(x.get_shape().as_list())
	layer1 = tf.nn.bias_add(tf.nn.conv2d(x, weights['enc_w0'], strides=[1,2,2,1],padding='SAME'),weights['enc_b0'])
	layer1 = tf.nn.relu(layer1)
	return  layer1, shapes

# Building the decoder
def decoder(self,z, weights, shapes):
	# Encoder Hidden layer with relu activation #1
	shape_de1 = shapes[0]
	layer1 = tf.add(tf.nn.conv2d_transpose(z, weights['dec_w0'], tf.stack([tf.shape(self.x)[0],shape_de1[1],shape_de1[2],shape_de1[3]]),\
	 strides=[1,2,2,1],padding='SAME'),weights['dec_b0'])
	layer1 = tf.nn.relu(layer1)
	
	return layer1



def selfexpressive_moduel(self,batch_size):
	
	Coef = tf.Variable(1.0e-8 * tf.ones([self.batch_size, self.batch_size],tf.float32), name = 'Coef')			
	z_ssc = tf.matmul(Coef,	self.z_conv)
	return z_ssc, Coef

#tf中tensorboard工具通过读取在网络训练过程中保存到本地的日志文件实现数据可视化,日志数据保存主要用到 tf.summary 中的方法。
#tf.summary中summary是tf中的一个py文件,提供了像tf.summary.scalar、tf.summary.histogram 等方法,
# 用来把graph图中的相关信息,如结构图、学习率、准确率、Loss等数据转换成 protocol buffer 数据格式,
# 然后再调用 tf.summary.FileWriter 方法把这些数据写入到本地硬盘,之后就可以借用 tensorboard 工具可视化这些数据
#tf.summary.scalar(name, tensor, collections=None, family=None 用来保存tensor标量
#使用 tf.summary.merge(inputs, collections=None, name=None) 对指定的汇总进行合并。
#使用 tf.summaries.merge_all(key=_ops.GraphKeys.SUMMARIES) 对所有的汇总进行合并
#使用sess.run()来启动操作
def finetune_fit(self, X, lr):
	#见收藏夹:sess.run( , )里面进行传参/数据每迭代一次,batch就会更新一次/可以把tensor转换为numpy/,其中feed_dict中有的参数是产生在需要传参的函数中的参数
	C,l1_cost, l2_cost, summary, _ = self.sess.run((self.Coef, self.reg_ssc, self.cost_ssc, self.merged_summary_op, self.optimizer_ssc), \
												feed_dict = {self.x: X, self.learning_rate: lr})
	self.summary_writer.add_summary(summary, self.iter)
	self.iter = self.iter + 1
	return C, l1_cost,l2_cost 

def initlization(self):
	tf.reset_default_graph()
	self.sess.run(self.init)	

def transform(self, X):
	return self.sess.run(self.z_conv, feed_dict = {self.x:X})

def save_model(self):
	save_path = self.saver.save(self.sess,self.model_path)
	print ("model saved in file: %s" % save_path)

def restore(self):
	self.saver.restore(self.sess, self.model_path)
	print ("model restored")

def best_map(L1,L2):
#L1 should be the labels and L2 should be the clustering number we got
Label1 = np.unique(L1)
nClass1 = len(Label1)
Label2 = np.unique(L2)
nClass2 = len(Label2)
nClass = np.maximum(nClass1,nClass2)
G = np.zeros((nClass,nClass))
for i in range(nClass1):
ind_cla1 = L1 == Label1[i]
ind_cla1 = ind_cla1.astype(float)
for j in range(nClass2):
ind_cla2 = L2 == Label2[j]
ind_cla2 = ind_cla2.astype(float)
G[i,j] = np.sum(ind_cla2 * ind_cla1)
m = Munkres()
index = m.compute(-G.T)
index = np.array(index)
c = index[:,1]
newL2 = np.zeros(L2.shape)
for i in range(nClass2):
newL2[L2 == Label2[i]] = Label1[c[i]]
return newL2

def thrC(C,ro):
if ro < 1:
N = C.shape[1]
Cp = np.zeros((N,N))
S = np.abs(np.sort(-np.abs©,axis=0))
Ind = np.argsort(-np.abs©,axis=0)
for i in range(N):
cL1 = np.sum(S[:,i]).astype(float)
stop = False
csum = 0
t = 0
while(stop == False):
csum = csum + S[t,i]
if csum > ro*cL1:
stop = True
Cp[Ind[0:t+1,i],i] = C[Ind[0:t+1,i],i]
t = t + 1
else:
Cp = C

return Cp

def post_proC(C, K, d, alpha):
# C: coefficient matrix, K: number of clusters, d: dimension of each subspace
n = C.shape[0]
C = 0.5*(C + C.T)
C = C - np.diag(np.diag©) + np.eye(n,n) # for sparse C, this step will make the algorithm more numerically stable
r = d*K + 1
U, S, _ = svds(C,r,v0 = np.ones(n))
U = U[:,::-1]
S = np.sqrt(S[::-1])
S = np.diag(S)
U = U.dot(S)
#对数据进行L2正则化
U = normalize(U, norm=‘l2’, axis = 1)
Z = U.dot(U.T)
Z = Z * (Z>0)
L = np.abs(Z ** alpha)
L = L/L.max()
L = 0.5 * (L + L.T)
spectral = cluster.SpectralClustering(n_clusters=K, eigen_solver=‘arpack’, affinity=‘precomputed’, assign_labels=‘discretize’)
spectral.fit(L)
grp = spectral.fit_predict(L) + 1
return grp, L

def err_rate(gt_s, s):
c_x = best_map(gt_s,s)
err_x = np.sum(gt_s[:] != c_x[:])
missrate = err_x.astype(float) / (gt_s.shape[0])
return missrate

def test_face(Img, Label, CAE, num_class):
alpha = 0.04
print(alpha)

acc_ = []
for i in range(0, 1):
	coil20_all_subjs = Img
	coil20_all_subjs = coil20_all_subjs.astype(float)
	label_all_subjs = Label
	label_all_subjs = label_all_subjs - label_all_subjs.min() + 1
	label_all_subjs = np.squeeze(label_all_subjs)

	CAE.initlization()
	CAE.restore()

	max_step = 30  # 50 + num_class*25# 100+num_class*20
	display_step = max_step  # 10
	lr = 1.0e-3
	# fine-tune network
	iter_ft = 0
	while iter_ft < max_step:
		iter_ft = iter_ft + 1
		C, l1_cost, l2_cost = CAE.finetune_fit(coil20_all_subjs,lr)
		# if iter_ft % display_step == 0:
		print("epoch: %.1d" % iter_ft, "cost: %.8f" % (l1_cost / float(batch_size)))
		C = thrC(C, alpha)

		y_x, CKSym_x = post_proC(C, num_class, 12, 8)
		missrate_x = err_rate(label_all_subjs, y_x)

		acc = 1 - missrate_x
		print("experiment: %d" % i, "acc: %.4f" % acc)
	acc_.append(acc)

acc_ = np.array(acc_)
m = np.mean(acc_)
me = np.median(acc_)
print("%d subjects:" % num_class)
print("Mean: %.4f%%" % ((1 - m) * 100))
print("Median: %.4f%%" % ((1 - me) * 100))
print(acc_)

return (1 - m), (1 - me)

if name == ‘main’:
data = sio.loadmat(’./Data/COIL20.mat’)
Img = data[‘fea’]
Label = data[‘gnd’]
Img = np.reshape(Img,(Img.shape[0],32,32,1))

n_input = [32,32]
kernel_size = [3]
n_hidden = [15]
batch_size = 20*72

all_subjects = [20]
avg = []
med = []

iter_loop = 0
while iter_loop < len(all_subjects):
num_class = all_subjects[iter_loop]
batch_size = num_class * 72
reg1 = 1.0 # 即文中的lamda1(正则化系数)
reg2 = 150.0 # 即文中的lamda2(正则化系数)

model_path = ‘./pretrain-model-COIL20/model.ckpt’
ft_path = ‘./pretrain-model-COIL20/model.ckpt’
logs_path = ‘./logs/COIL20-logs’

tf.reset_default_graph()
CAE = ConvAE(n_input=n_input, n_hidden=n_hidden, reg_const1=reg1, reg_const2=reg2, kernel_size=kernel_size,
batch_size=batch_size, model_path=model_path, logs_path=logs_path)

avg_i, med_i = test_face(Img, Label, CAE, num_class)
avg.append(avg_i)
med.append(med_i)
iter_loop = iter_loop + 1

iter_loop = 0
while iter_loop < len(all_subjects):
num_class = all_subjects[iter_loop]
print(’%d subjects:’ % num_class)
print(‘Mean: %.4f%%’ % (avg[iter_loop] * 100), ‘Median: %.4f%%’ % (med[iter_loop] * 100))
iter_loop = iter_loop + 1

  • 0
    点赞
  • 2
    评论
  • 0
    收藏
  • 一键三连
    一键三连
  • 扫一扫,分享海报

©️2021 CSDN 皮肤主题: 1024 设计师:白松林 返回首页
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、C币套餐、付费专栏及课程。

余额充值