keras探究之losses/metrics/softmax层

初期使用keras会对其中的很多函数的实际计算掌握不是很好,所以通过自己编写相应的例子实现对keras相关函数的验证。




'''
说明:
1.Test1函数用来测试keras中acc和loss计算方式
  准确率acc  准确率就是当前batch中预测准确的样本占整个batch的百分比
2、metrics
(1)性能衡量函数使用keras.metrics
    以下函数完成了对metrics的测试
    #Test_binary_accuracy()
    #Test_categorical_accuracy()
    #Test_sparse_categorical_accuracy()
    #Test_top_k_categorical_accuracy()
3、losses
   存在于keras.losses中
   #在Test_mean_squared_error对mean_squared_error计算进行了验证
   #在test1中对categorical_crossentropy损失函数进行了验证
   #keras中使用的公式见链接:https://blog.csdn.net/a984297068/article/details/81197893中2.2节
   此外keras中损失函数调用源码在如下链接中https://github.com/keras-team/keras/blob/master/keras/losses.py

4、softmax层
在最后一层的激活函数使用softmax时,其计算方式为先将前一层的输出通过ai=(wi)x+b计算好之后,然后使用exp(ai)/(exp(a1)+exp(a2)+   ……  exp(an))作为输出
注意在a=wx+b时,是采用全连接不共享权重进行计算,而且在接softmax层之前,需要Flatten层将输入压平

'''

import numpy as np

import matplotlib.pyplot as plt

import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,BatchNormalization,PReLU
from keras.layers import Conv2D, MaxPool2D, AveragePooling2D, Activation, Embedding
import keras.backend as K
from keras.callbacks import LearningRateScheduler
from keras.utils import np_utils
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard

from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator

from keras.models import Model, Input
from keras.optimizers import SGD
from keras.applications.resnet50 import ResNet50

import math
import tensorflow as tf

def ObtainLayerOutput(input_model,input_layer_name,input_data):
    target_layer = Model(inputs=input_model.input, outputs=input_model.get_layer(input_layer_name).output)
    layer_output = target_layer.predict(input_data)
    return layer_output

def ObtainLayerWeightsAndBias(model,input_layer_name):
    # 获得某一层的权重和偏置
    weights = model.get_layer(input_layer_name).get_weights()
    return weights

def Compute_Categorical_Crossentropy(labels,output):
    num=np.shape(labels)[0]
    c_log=np.log(output)
    A=np.multiply(labels,c_log)
    return A.sum()*(-1)/num



def Test1():
    #1make data
    train_data = np.array([2, 2, 3,
                           2, 3, 4,
                           2, 4, 5,
                           1, 5, 7])
    train_data = train_data.astype('float32')
    train_data /= 10.0
    train_data = np.reshape(train_data, [4, 3, 1])
    labels = np.mat(([0], [1], [0], [1]))  # (4,2)
    labels_num = 2
    labels = keras.utils.to_categorical(labels, labels_num)  # 在此操作之前要将变量转换为列向量

    #print(train_data.dtype, np.shape(train_data), train_data)
    #print(labels.dtype, np.shape(labels), labels)
    #return train_data, labels, labels_num

#2执行
    input_shape = (3, 1)
    model = Sequential()
    model.add(Dense(1, use_bias=False,
                    input_shape=input_shape))  # activation='relu',#relu,sigmoid activation='relu',input_shape=input_shape
    model.add(Flatten())

    model.add(Dense(labels_num, use_bias=False, activation='softmax'))  # 全连接2层
    model.summary()

    # compile the model 编译模型
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    # train the model 训练模型

    model.fit(train_data, labels,
              batch_size=4,
              epochs=2,
              verbose=1)
    #model.save_weights("weight\\test1.h5")  # 创建 HDF5 文件 'my_model.h5'

    score = model.evaluate(train_data, labels, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    th1 = np.reshape(train_data[0], [1, 3, 1])
    th2 = np.reshape(train_data[1], [1, 3, 1])
    th3 = np.reshape(train_data[2], [1, 3, 1])
    th4 = np.reshape(train_data[3], [1, 3, 1])
    th1_out = model.predict(th1)
    th2_out = model.predict(th2)
    th3_out = model.predict(th3)
    th4_out = model.predict(th4)

    print("打印各个输入参数的预测值,用来验证准确率参数")
    print("th1", th1_out,np.shape( th1_out))
    print("th2", th2_out,np.shape( th2_out))
    print("th3", th3_out,np.shape( th3_out))
    print("th4", th4_out,np.shape( th4_out))
    print("---------------------------------------------")
    print("验证损失函数")
    print("输入参数", th1, np.shape(th1))

    flatten_1_output = ObtainLayerOutput(model, 'flatten_1', th1)
    print("flatten_1层输出", flatten_1_output, np.shape(flatten_1_output))

    m_weights_dense_2 = ObtainLayerWeightsAndBias(model, 'dense_2')
    print("m_weights_dense_2", m_weights_dense_2, np.shape(m_weights_dense_2))

    final_output=np.matmul(flatten_1_output, m_weights_dense_2[0])
    final_output_e=np.exp(final_output)

    v0 = final_output_e[0][0]/(final_output_e[0][1]+final_output_e[0][0])
    v1 = final_output_e[0][1] / (final_output_e[0][1] + final_output_e[0][0])
    print("final_output",v0,v1)
    print("softmax函数验证完成最终输出final_output与 th1相同")
    print("---------------------------------------------")
    print("对损失函数进行测试")
    all_out_put = np.append(th1_out, th2_out, axis=0)
    all_out_put = np.append(all_out_put, th3_out, axis=0)
    all_out_put = np.append(all_out_put, th4_out, axis=0)
    print("all_out_put",all_out_put,np.shape(all_out_put))

    m_loss=Compute_Categorical_Crossentropy(labels, all_out_put)
    print("m_loss",m_loss)
    print("验证集的损失Test loss和m_loss一致,验证成功")

def Test_binary_accuracy():
    y_true =np.array([0.0, 0.0, 1.0, 0.0])
    y_pred=np.array([0.0, 0.5, 0.83, 0.6])
    K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
    print("y_true",np.shape(y_true))
    print("y_pred",np.shape(y_pred))
    m_binary_accuracy = keras.metrics.binary_accuracy(y_true, y_pred)
    print("binary_accuracy",m_binary_accuracy,np.shape(m_binary_accuracy))
    with tf.Session() as sess:
        print(sess.run(tf.get_default_graph().get_tensor_by_name("Mean_1:0")))
    # 输出:0.75

def Test_categorical_accuracy():
    y_true = np.array([0, 1, 0,#1
                       0, 0, 1,#2
                       1, 0, 0,#0
                       0, 1, 0])#1
    y_pred = np.array([0.3, 0.5, 0.2,#1
                       0.1, 0.8, 0.1,#1
                       0.6, 0.2, 0.2,#0
                       0.2,0.5, 0.3])#1
    y_true=np.reshape(y_true,(4,3))
    y_pred = np.reshape(y_pred,(4, 3))
    #print("y_true",y_true,np.shape(y_true))
    #print("y_pred", y_pred, np.shape(y_pred))
    m_categorical_accuracy = keras.metrics.categorical_accuracy(y_true, y_pred)
    #print("m_categorical_accuracy",m_categorical_accuracy)
    with tf.Session() as sess:
        print(sess.run(tf.get_default_graph().get_tensor_by_name("Cast:0")))
    #输出:[1. 0. 1. 1.]

'''
Test_sparse_categorical_accuracy
•针对稀疏情况的多分类,这里的 y_true 就是真实类的 index ,是个整数
'''
def Test_sparse_categorical_accuracy():

    y_true = np.array([1.0,
                       2.0,
                       0.0,
                       1.0],dtype=np.float32)
    y_pred = np.array([0.3, 0.5, 0.2,  # 1
                       0.1, 0.8, 0.1,  # 1
                       0.6, 0.2, 0.2,  # 0
                       0.2, 0.5, 0.3])  # 1
    #y_true = np.reshape(y_true, (4, 1))
    y_pred = np.reshape(y_pred, (4, 3))


    print("y_true",y_true,np.shape(y_true))
    print("y_pred", y_pred, np.shape(y_pred))
    m_sparse_categorical_accuracy = keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
    print("m_sparse_categorical_accuracy",m_sparse_categorical_accuracy)

    with tf.Session() as sess:
       print(sess.run(tf.get_default_graph().get_tensor_by_name("Cast_1:0")))
    #输出:[1. 0. 1. 1.]
'''
top_k_categorical_accuracy
计算top-k正确率,当预测值的前k个值中存在目标类别即认为预测正确 
'''
def Test_top_k_categorical_accuracy():
    y_true = np.array([0, 1, 0,  # 1
                       0, 0, 1,  # 2
                       1, 0, 0,  # 0
                       0, 1, 0])  # 1
    y_pred = np.array([0.3, 0.5, 0.2,  # 1
                       0.1, 0.6, 0.3,  # 1
                       0.2, 0.8, 0.1,  # 0
                       0.5, 0.0, 0.5])  # 1
    y_true = np.reshape(y_true, (4, 3))
    y_pred = np.reshape(y_pred, (4, 3))


    print("y_true",y_true,np.shape(y_true))
    print("y_pred", y_pred, np.shape(y_pred))
    m_top_k_categorical_accuracy = keras.metrics.top_k_categorical_accuracy(y_true, y_pred,2)
    print("m_top_k_categorical_accuracy",m_top_k_categorical_accuracy)

    with tf.Session() as sess:
       print(sess.run(tf.get_default_graph().get_tensor_by_name("Mean:0")))
    #返回值:0.75

def Test_mean_squared_error():
    y_true = np.array([0, 1, 0,  # 1
                       0, 0, 1,  # 2
                       1, 0, 0,  # 0
                       0, 1, 0])  # 1
    y_pred = np.array([0, 0.9, 0,  # 1
                       0.1, 0.6, 0.3,  # 1
                       0.2, 0.8, 0.1,  # 0
                       0.5, 0.0, 0.5])  # 1
    y_true = np.reshape(y_true, (4, 3))
    y_pred = np.reshape(y_pred, (4, 3))

    print("y_true", y_true, np.shape(y_true))
    print("y_pred", y_pred, np.shape(y_pred))
    m_mean_squared_error = keras.losses.mean_squared_error(y_true, y_pred)
    print("m_mean_squared_error", m_mean_squared_error)
    with tf.Session() as sess:
        print(sess.run(tf.get_default_graph().get_tensor_by_name("Mean:0")))
    #输出:[0.00333333 0.28666667 0.43       0.5]
    #第一个值的计算方式为(1-0.9)*(1-0.9)/3





if __name__ == '__main__':
    #Test1()
    #Test_binary_accuracy()
    #Test_categorical_accuracy()
    #Test_sparse_categorical_accuracy()
    #Test_top_k_categorical_accuracy()
    Test_mean_squared_error()



评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值