CNN结果 TSNE可视化

记录一下怎么实现:
CNN结果 TSNE可视化。
原文点击

代码如下:

通过分析这个代码可以根据自己的需要进行可视化。

# 简略版:

可视化关键是输入特征,只要特征确定了,后面就是走流程的事儿了。

     data, label , n_samples, n_features = get_data()		# 调用函数,获取数据集信息
     print('Starting compute t-SNE Embedding...')
     ts = TSNE(n_components=2, init='pca', random_state=0)
	# t-SNE降维
     reslut = ts.fit_transform(data)
	# 调用函数,绘制图像
     fig = plot_embedding(reslut, label, 't-SNE Embedding of digits')
	# 显示图像
     plt.show()

# 详细版:

可以走一遍流程,跑一下就能分析出来了。

# -*- coding: utf-8 -*-
"""
Created on Wed Jul  7 11:55:08 2021

@author: 1
"""
import tensorflow as tf
from sklearn.manifold import TSNE
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils,plot_model
from sklearn.model_selection import cross_val_score,train_test_split,KFold
from sklearn.preprocessing import LabelEncoder
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.optimizers import SGD
from keras.layers import Dense,LSTM, Activation, Flatten, Convolution1D, Dropout,MaxPooling1D,BatchNormalization
from keras.models import load_model
from sklearn import preprocessing
# 载入数据
df = pd.read_csv(r'C:/Users/1/Desktop/14改.csv')
X = np.expand_dims(df.values[:, 0:1024].astype(float), axis=2)
Y = df.values[:, 1024]
 
# 湿度分类编码为数字

 
# 划分训练集,测试集
X_train, X_test, K, y = train_test_split(X, Y, test_size=0.3, random_state=0)
K=K

encoder = LabelEncoder()
Y_encoded1 = encoder.fit_transform(K)
Y_train = np_utils.to_categorical(Y_encoded1)

Y_encoded2 = encoder.fit_transform(y)
Y_test = np_utils.to_categorical(Y_encoded2)

# 定义神经网络
def baseline_model():
    model = Sequential()
    model.add(Convolution1D(16, 64,strides=16,padding='same', input_shape=(1024, 1),activation='relu'))#第一个卷积层
    model.add(MaxPooling1D(2,strides=2,padding='same'))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    

    model.add(Convolution1D(32,3,padding='same',activation='relu'))
    model.add(MaxPooling1D(2,strides=2,padding='same'))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    
    model.add(Convolution1D(64,3,padding='same',activation='relu'))#第二个卷积层
    model.add(MaxPooling1D(2,strides=2,padding='same'))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    
    model.add(Convolution1D(64, 3,padding='same',activation='relu'))#第三个卷积层
    model.add(MaxPooling1D(2,strides=2,padding='same'))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    
    model.add(Convolution1D(64, 3,padding='same',activation='relu'))#第四个卷积层
    model.add(MaxPooling1D(2,strides=2,padding='same'))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
    
    model.add(Convolution1D(64,3,padding='same',activation='relu'))#第五个卷积层
    model.add(MaxPooling1D(2,strides=2,padding='same'))
    model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))


    model.add(Dense(100,activation='relu'))
    model.add(LSTM(64,return_sequences=True))
    model.add(Dropout(0.5))
    model.add(LSTM(32))
    model.add(Flatten())
    model.add(Dense(9, activation='softmax'))
    model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
    model.summary()
    return model
 
# 训练分类器
estimator = KerasClassifier(build_fn=baseline_model, epochs=3000, batch_size=128, verbose=1)
history=estimator.fit(X_train, Y_train, validation_data=(X_test, Y_test))
import matplotlib.pyplot as plt

# 卷积网络可视化
def visual(model, data, num_layer=1):
     layer = keras.backend.function([model.layers[0].input], [model.layers[num_layer].output])
     f1 = layer([data])[0]
     np.set_printoptions(threshold=np.inf)
     print(f1.shape)
     print(f1)
     f2=f1.reshape(6034,64)
     print(f2)
     num = f1.shape[-1]
     print(num)
     plt.figure(figsize=(6, 12), dpi=150)
     for i in range(num):
         plt.subplot(np.ceil(np.sqrt(num)), np.ceil(np.sqrt(num)), i+1)
         plt.imshow(f1[:, :, i] * 255, cmap='prism')
         plt.axis('off')
     plt.show()
     def get_data():
	
	#digits = datasets.load_digits(n_class=10)
         digits=2
         data = f2#digits.data		# 图片特征
         label = K#digits.target		# 图片标签
         n_samples=6034
         n_features =64 #data.shape		# 数据集的形状
         return data, label, n_samples, n_features


# 对样本进行预处理并画图
     def plot_embedding(data, label, title):

         x_min, x_max = np.min(data, 0), np.max(data, 0)
         data = (data - x_min) / (x_max - x_min)		# 对数据进行归一化处理
         fig = plt.figure()		# 创建图形实例
         ax = plt.subplot(111)		# 创建子图
	# 遍历所有样本
         for i in range(data.shape[0]):
		# 在图中为每个数据点画出标签
             plt.text(data[i, 0], data[i, 1], str(label[i]), color=plt.cm.Set1(label[i] / 10),
				 fontdict={'weight': 'bold', 'size': 7})
         plt.xticks()		# 指定坐标的刻度
         plt.yticks()
         plt.title(title, fontsize=14)
	# 返回值
         return fig



     data, label , n_samples, n_features = get_data()		# 调用函数,获取数据集信息
     print('Starting compute t-SNE Embedding...')
     ts = TSNE(n_components=2, init='pca', random_state=0)
	# t-SNE降维
     reslut = ts.fit_transform(data)
	# 调用函数,绘制图像
     fig = plot_embedding(reslut, label, 't-SNE Embedding of digits')
	# 显示图像
     plt.show()
    


# 可视化卷积层
visual(estimator.model, X_train, 20)#在这里插入代码片


  • 2
    点赞
  • 28
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值