keras多输出和data_generator使用总结

https://stackoverflow.com/questions/44036971/multiple-outputs-in-keras

多输出loss

from sklearn.datasets import load_iris
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input, Model
import tensorflow as tf

data, target = load_iris(return_X_y=True)
X = data[:, (0, 1, 2)]
Y = data[:, 3]
Z = target

inputs = Input(shape=(3,), name='input')
x = Dense(16, activation='relu', name='16')(inputs)
x = Dense(32, activation='relu', name='32')(x)
output1 = Dense(1, name='cont_out')(x)
output2 = Dense(3, activation='softmax', name='cat_out')(x)

model = Model(inputs=inputs, outputs=[output1, output2])

model.compile(loss={'cont_out': 'mean_absolute_error', 
                    'cat_out': 'sparse_categorical_crossentropy'},
              optimizer='adam',
              metrics={'cat_out': tf.metrics.SparseCategoricalAccuracy(name='acc')})

history = model.fit(X, {'cont_out': Y, 'cat_out': Z}, epochs=10, batch_size=8)

结果如下
在这里插入图片描述

使用案例2

import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn.datasets import load_iris
tf.keras.backend.set_floatx('float64')
iris, target = load_iris(return_X_y=True)

X = iris[:, :3]
y = iris[:, 3]
z = target

ds = tf.data.Dataset.from_tensor_slices((X, y, z)).shuffle(150).batch(8)

class MyModel(Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.d0 = Dense(16, activation='relu')
        self.d1 = Dense(32, activation='relu')
        self.d2 = Dense(1)
        self.d3 = Dense(3, activation='softmax')

    def call(self, x, training=None, **kwargs):
        x = self.d0(x)
        x = self.d1(x)
        a = self.d2(x)
        b = self.d3(x)
        return a, b

model = MyModel()

loss_obj_reg = tf.keras.losses.MeanAbsoluteError()
loss_obj_cat = tf.keras.losses.SparseCategoricalCrossentropy()

optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)

loss_reg = tf.keras.metrics.Mean(name='regression loss')
loss_cat = tf.keras.metrics.Mean(name='categorical loss')

error_reg = tf.keras.metrics.MeanAbsoluteError()
error_cat = tf.keras.metrics.SparseCategoricalAccuracy()

@tf.function
def train_step(inputs, y_reg, y_cat):
    with tf.GradientTape() as tape:
        pred_reg, pred_cat = model(inputs)
        reg_loss = loss_obj_reg(y_reg, pred_reg)
        cat_loss = loss_obj_cat(y_cat, pred_cat)

    gradients = tape.gradient([reg_loss, cat_loss], model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    loss_reg(reg_loss)
    loss_cat(cat_loss)

    error_reg(y_reg, pred_reg)
    error_cat(y_cat, pred_cat)


for epoch in range(50):
    for xx, yy, zz in ds:
        train_step(xx, yy, zz)

    template = 'Epoch {:>2}, SCCE: {:>5.2f},' \
               ' MAE: {:>4.2f}, SAcc: {:>5.1%}'
    print(template.format(epoch+1,
                        loss_cat.result(),
                        error_reg.result(),
                        error_cat.result()))

    loss_reg.reset_states()
    loss_cat.reset_states()

    error_reg.reset_states()
    error_cat.reset_states()

在这里插入图片描述

使用案例3

#多输入模型
import tensorflow
from tensorflow.keras.utils import plot_model#多输入模型
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
#keras 的核心数据结构是"模型”
model=Sequential()
from tensorflow.keras.layers import Dense, Activation,Dropout

from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
%matplotlib inline
import random
from tensorflow.keras.models import load_model

from tensorflow.keras.datasets import mnist 
(x_train, y_train), (x_test, y_test) = mnist.load_data()#导入挺快的,不用下载
#x_train.shape=(60000, 28, 28)
#y_train.shape=(60000,)
#x_test.shape=(10000, 28, 28)
#y_test.shape=(10000,)
# import matplotlib.pyplot as plt # 导入可视化的包
# im = plt.imshow(x_train[0],cmap='gray')#显示图片
# plt.show()
#print(y_train[0])#打印真实标签,0..9没有10
#Counter({1: 6742, 7: 6265, 3: 6131, 2: 5958, 9: 5949, 0: 5923, 6: 5918, 8: 5851, 4: 5842, 5: 5421})
#直接使用reshape函数将其压缩为6000*784的
x_train=x_train.reshape(60000,784)
x_test=x_test.reshape(10000,784)

#print(x_train.shape)
#print(x_test.shape)
#print(x_train[0])
#对数据进行归一化处理
x_train = x_train / 255
x_test = x_test / 255
#对y标签进行处理
#例如 5 --> [ 0, 0, 0, 0, 0,1, 0, 0, 0, 0]]
y_train = tensorflow.keras.utils.to_categorical(y_train,10)
y_test = tensorflow.keras.utils.to_categorical(y_test,10)

#-----------------小样本测试---------------------------
rate=1;
index=random.sample(range(0,x_train.shape[0]), int(rate*x_train.shape[0]))
x_train=x_train[index,:]
y_train=y_train[index,:]
#-----------------小样本测试---------------------------
class LossHistory(keras.callbacks.Callback):#注意losses并不是内置的,而是自己设置的私有属性,你也可以取别的名字
    def on_train_begin(self, logs={}):
        self.losses = {'batch':[], 'epoch':[]}#开始时没有loss,然后加入loss
        self.classification_loss={'batch':[], 'epoch':[]}
        self.classification_acc={'batch':[], 'epoch':[]}
        self.decode_loss={'batch':[], 'epoch':[]}
        # self.accuracy = {'batch':[], 'epoch':[]}
        # self.val_loss = {'batch':[], 'epoch':[]}
        # self.val_acc = {'batch':[], 'epoch':[]}
 
    def on_batch_end(self, batch, logs={}):
        self.losses['batch'].append(logs.get('loss'))
        self.classification_loss['batch'].append(logs.get('class_out_loss'))
        self.classification_acc['batch'].append(logs.get('class_out_acc'))
        self.decode_loss['batch'].append(logs.get('de_out_loss'))
        
        # self.accuracy['batch'].append(logs.get('acc'))
        # self.val_loss['batch'].append(logs.get('val_loss'))
        # self.val_acc['batch'].append(logs.get('val_acc'))
 
    def on_epoch_end(self, batch, logs={}):
        self.losses['epoch'].append(logs.get('loss'))
        self.classification_loss['epoch'].append(logs.get('class_out_loss'))
        self.classification_acc['epoch'].append(logs.get('class_out_acc'))
        self.decode_loss['epoch'].append(logs.get('de_out_loss'))
        # self.accuracy['epoch'].append(logs.get('acc'))
        # self.val_loss['epoch'].append(logs.get('val_loss'))
        # self.val_acc['epoch'].append(logs.get('val_acc'))
    #第一个是交叉熵
    def classification_plot(self, loss_type):
        iters = range(len(self.classification_loss[loss_type]))
        plt.figure()
        # acc
        plt.plot(iters, self.classification_acc[loss_type], 'r', label='classification acc')
        # loss
        plt.plot(iters, self.classification_loss[loss_type], 'g', label='classification cross entropy loss')
        
        #if loss_type == 'epoch':
            # val_acc
            #plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')
            # val_loss
            #plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
        
        plt.grid(True)
        plt.xlabel("epoch")
        plt.ylabel('classfication')
        plt.legend(loc="upper right")
        plt.show()#上面的几句话都得加,然后才能显示结果
    def decode_loss_plot(self, loss_type):
        iters = range(len(self.decode_loss[loss_type]))
        plt.figure()
        # acc
        #plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')
        # loss
        plt.plot(iters, self.decode_loss[loss_type], 'g', label='decode loss')
        
        #if loss_type == 'epoch':
            # val_acc
            #plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')
            # val_loss
            #plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
        
        plt.grid(True)
        plt.xlabel("epoch")
        plt.ylabel('decode-loss')
        #plt.legend(loc="upper right")
        plt.show()#上面的几句话都得加,然后才能显示结果     
    def loss_plot(self, loss_type):
        iters = range(len(self.losses[loss_type]))
        plt.figure()
        # acc
        #plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')
        # loss
        plt.plot(iters, self.losses[loss_type], 'g', label='train loss')
        
        #if loss_type == 'epoch':
            # val_acc
            #plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')
            # val_loss
            #plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
        
        plt.grid(True)
        plt.xlabel("epoch")
        plt.ylabel('loss')
        #plt.legend(loc="upper right")
        plt.show()#上面的几句话都得加,然后才能显示结果     
        
# creating model
inputs = Input(shape = (784,),name="input")
dense1 = Dense(512, activation = 'relu')(inputs)
dense2 = Dense(128, activation = 'relu')(dense1)
dense3 = Dense(32,name="decoder", activation = 'relu')(dense2)
 
# create classification output
classification_output = Dense(10, name="class_out",activation = 'softmax')(dense3)
 
# use output from dense layer 3 to create autoencder output
up_dense1 = Dense(128, activation = 'relu')(dense3)#这个地方是接着dense3,并不是和classification_output相连的
up_dense2 = Dense(512, activation = 'relu')(up_dense1)
decoded_outputs = Dense(784,name="de_out")(up_dense2)

model = Model(inputs, [classification_output,decoded_outputs])
model.summary()

plot_model(model, to_file='model.png', show_shapes=True)
history = LossHistory()

m = 256
n_epoch = 10
model.compile(optimizer='adam', loss=['categorical_crossentropy', 'mse'], loss_weights = [1.0, 0.5], metrics = ['accuracy'])
model.fit(x_train,[y_train, x_train], epochs=n_epoch, batch_size=m, shuffle=True,callbacks=[history])
#输入,输出
history.loss_plot('epoch')
history.decode_loss_plot('epoch')
history.classification_plot('epoch')

在这里插入图片描述在这里插入图片描述在这里插入图片描述

fit_generator使用

https://gist.github.com/twolodzko/aa4f4ad52f16c293df40342929b025a4

import tensorflow as tf
import numpy as np

import matplotlib.pyplot as plt

from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras import regularizers
from keras.datasets import mnist
from keras import backend as K

tf.test.gpu_device_name()
(x_train, _), (x_test, _) = mnist.load_data()

x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255

x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
from keras.utils import Sequence

class DataGenerator(Sequence):

    def __init__(self, data, batch_size=32, noisy=False, noise_factor=0.5, shuffle=True):
        self.data = data
        self.data_noisy = None
        self.index = [i for i in range(self.data.shape[0])]

        self.batch_size = batch_size
        self.noisy = noisy
        self.noise_factor = noise_factor
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        return int(len(self.data) / self.batch_size)

    def __getitem__(self, index):
        train, test = [], []
        for i in range(index, index+self.batch_size):
            train.append(self.data_noisy[self.index[i]])
            test.append(self.data[self.index[i]])
        return np.array(train), np.array(test)

    def on_epoch_end(self):
        np.random.shuffle(self.index)
        if self.noisy:
            noise = np.random.normal(loc=0.0, scale=self.noise_factor, size=self.data.shape) 
            self.data_noisy = self.data + noise
            np.clip(self.data_noisy, 0., 1.)
        else:
            self.data_noisy = self.data
train_gen = DataGenerator(x_train, noisy=True)
test_gen = DataGenerator(x_test, noisy=True)
encoding_dim = 32  

input_img = Input(shape=(784,))
encoded = Dense(encoding_dim, activation='relu',
                activity_regularizer=regularizers.l1(1e-4))(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)

encoder = Model(input_img, encoded)

encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))

autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

autoencoder.summary()

结果如下
在这里插入图片描述

history = autoencoder.fit_generator(generator=train_gen,
                                    validation_data=test_gen,
                                    epochs=20)

在这里插入图片描述

plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.show()

在这里插入图片描述

noise_factor = 0.6
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape) 
x_test_noisy = np.clip(x_test_noisy, 0., 1.)

decoded_imgs = autoencoder.predict(x_test_noisy)

n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test_noisy[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值