Keras默认的自定义损失函数参数形式固定,一个为y_true,另一个为y_pred
例如:
def myloss(y_true,y_pred):
pass
# 自定义损失函数
def myloss(y_true,y_pred,neighbor_Y):
lamb1 = 0.7
lamb2 = 0.3
loss1 = K.mean(K.square(y_pred - y_true))
neighbor_Y = Reshape((3,))(neighbor_Y)
d1 = K.square(2 * neighbor_Y[:,1] - neighbor_Y[:,0] - neighbor_Y[:,2])
s = neighbor_Y[:,0] + neighbor_Y[:,2]
s = Reshape((1,))(s)
d2 = K.square(2 * y_pred - s)
d1 = Reshape((1,))(d1)
d2 = Reshape((1,))(d2)
loss2 = K.mean(K.abs(d1 - d2))
loss = lamb1 * loss1 + lamb2 * loss2
return loss
def dice_loss(neighbor_Y):
def dice(y_true,y_pred):
return myloss(y_true,y_pred,neighbor_Y)
return dice
model_dice = dice_loss(neighbor_true_Y)
model.compile(loss = model_dice,optimizer= optimizer,metrics=['mse','mae'])
# 测试代码
# -*- encoding = utf-8 -*-
# 测试keras
import numpy as np
from keras import backend as K
from keras.layers import LSTM,RepeatVector,Dense,\
Activation,Add,Reshape,Input,Lambda,Multiply,Concatenate,Dot
import tensorflow as tf
import pandas as pd
from matplotlib import pyplot as plt
np.set_printoptions(threshold=np.inf)
def myloss(y_true,y_pred,neighbor_Y):
lamb1 = 0.7
lamb2 = 0.3
loss1 = K.mean(K.square(y_pred - y_true))
neighbor_Y = Reshape((3,))(neighbor_Y)
print(K.int_shape(neighbor_Y))
d1 = K.square(2 * neighbor_Y[:,1] - neighbor_Y[:,0] - neighbor_Y[:,2])
s = neighbor_Y[:,0] + neighbor_Y[:,2]
s = Reshape((1,))(s)
d2 = K.square(2 * y_pred - s)
d1 = Reshape((1,))(d1)
d2 = Reshape((1,))(d2)
loss2 = K.mean(K.abs(d1 - d2))
loss = lamb1 * loss1 + lamb2 * loss2
return loss
y_true = Input(shape=(1,))
y_pred = Input(shape=(1,))
neighbor_Y = Input(shape = (3,))
loss_func = myloss(y_true,y_pred,neighbor_Y)
#
yy_true = np.arange(10,58).reshape(-1,1)
yy_pred = np.arange(20,500,10).reshape(-1,1)
neib_y = np.arange(0.3,14.7,0.1).reshape(-1,3)
sess = tf.Session()
K.set_session(sess)
init_op = tf.global_variables_initializer()
sess.run(init_op)
#
def another_way_to_calcualte(y_true1,y_pred1,neib_y):
y_true1 = y_true1.reshape(-1,1).flatten()
y_pred1 = y_pred1.reshape(-1,1).flatten()
neib_y = neib_y.reshape(-1,3)
loss1 = 0
loss2 = 0
weight1 = 0.7
weight2 = 0.3
for i in range(y_true1.shape[0]):
loss1 = loss1 + (y_true1[i] - y_pred1[i]) ** 2
loss2 = loss2 + abs((2 * neib_y[i][1] - neib_y[i][0] - neib_y[i][2]) ** 2 - (2 * y_pred1[i] - neib_y[i][0] - neib_y[i][2]) ** 2)
loss1 = loss1 / y_true1.shape[0]
loss2 = loss2 / y_true1.shape[0]
loss = weight1 * loss1 + weight2 * loss2
return loss
#
with sess.as_default():
loss = sess.run(loss_func,feed_dict={y_true:yy_true,y_pred:yy_pred,neighbor_Y:neib_y})
print(loss)
print(another_way_to_calcualte(yy_true,yy_pred,neib_y))
但是某些时候我们需要自定义损失函数,且损失函数计算不止依赖于这两个参数,这样我们就需要重新构造损失函数,这里需要一点trick,参考
https://blog.csdn.net/qq_23269761/article/details/84134971,我完成了我的自定义损失函数,并且验证结果是ok的。
输出为:
140431.39
140431.38
二者结果一致