Reputation: 37
I want to implement this loss like this:
This is code:
def loss(output, target, from_logits=False):
L1 = - tf.reduce_sum(target * tf.log(output), reduction_indices=len(output.get_shape()) - 1)
L2 = tf.reduce_sumr(tf.square(tf.subtract(tf.reduce_sum(tf.multiply(j,output), reduction_indices=len(output.get_shape()) - 1), y))))
L3 = tf.reduce_sum(tf.reduce_sum(tf.multiply(output,tf.square(tf.subtract(j,tf.reduce_prod(tf.multiply(k,p<sub>i,k</sub>)))))))
loss = L1 + L2 + L3
return loss
Please hep me. Thank you very much.
Upvotes: 2
Views: 232
Reputation: 86600
y_true
with shape (samples, 11)
, one hot encoded. softmax
(classes sum = 1) activation in the last layer A loss in Keras has the form def func(y_true, y_pred):
import keras.backend as K
from keras.losses import categorical_crossentropy
def loss(y_true, y_pred):
#p
p = y_pred #(samples,11)
#j or k
j = K.cast_to_floatx(K.arange(10,21)) #(11,)
j = K.reshape(j, (1,-1)) #(1,11)
#y_true as class number
y = K.sum(y_true * j, axis=-1, keepdims=True) #(samples, 1)
#first term:
L1 = categorical_crossentropy(y_true, y_pred) #(samples,)
#second term with y outstide sum in j
kpk = j * p #(samples, 11)
kpkSum = K.sum(kpk, axis=-1, keepdims=True) #(samples, 1)
L2 = kpkSum - y #(samples, 1)
L2 = K.square(L2) / 2. #(samples, 1)
#third term:
L3 = K.square(j - kpkSum) #(samples,11)
L3 = p * L3 #(samples,11)
L3 = K.sum(L3, axis=-1) #(samples,)
return L1 + L2 + L3 #the mean in N is automatic by Keras
Upvotes: 1