Reputation: 863
I used keras.Model to build the model, but I used a custom loss function, a custom training process, I wrote the iteration process and sess.run, then I want to get the weight l2 loss in the iterative process, How to do it?
Support the model is as follows:
def model():
x = Input(shape=(None, None, 3))
y = Conv2D(10, 3, strides=1, kernel_initializer=tf.glorot_uniform_initializer(), kernel_regularizer=regularizers.l2(0.0005))(x)
y = Conv2D(16, 3, strides=1, kernel_initializer=tf.glorot_uniform_initializer(), kernel_regularizer=regularizers.l2(0.0005))(y)
y = Conv2D(32, 3, strides=1, kernel_initializer=tf.glorot_uniform_initializer(), kernel_regularizer=regularizers.l2(0.0005))(y)
y = Conv2D(16, 3, strides=1, kernel_initializer=tf.glorot_uniform_initializer(), kernel_regularizer=regularizers.l2(0.0005))(y)
y = Conv2D(1, 3, strides=1, kernel_initializer=tf.glorot_uniform_initializer(), kernel_regularizer=regularizers.l2(0.0005))(y)
return Model(inputs=[x], outputs=[y])
def loss(y_true, y_pred):
return tf.softmax_loss(.....)
train code:
def train():
dataset = tf.TFRecordDataset(tfrecords).make_one_shot_iterator().get_next()
input_image = tf.placeholder(...)
label = tf.placeholder(...)
net = model()
pred = model(input_image)
loss_op = loss(label, pred)
while True:
imgs, loss = sess.run([dataset, loss_op])
Through the above code, I don't think I have lost the weight. How can I get it? I try to use l2_loss_op = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
, but the value is 0.
Upvotes: 3
Views: 3524
Reputation: 19836
Update: cleaner and more complete implementation here.
I wrote a custom function to return l1
, l2
, and l1_l2
losses from all layers, including recurrent, summed up; does not include activity_regularizer
losses, which aren't weight losses:
def l1l2_weight_loss(model):
l1l2_loss = 0
for layer in model.layers:
if 'layer' in layer.__dict__ or 'cell' in layer.__dict__:
l1l2_loss += _l1l2_rnn_loss(layer)
continue
if 'kernel_regularizer' in layer.__dict__ or \
'bias_regularizer' in layer.__dict__:
l1l2_lambda_k, l1l2_lambda_b = [0,0], [0,0] # defaults
if layer.__dict__['kernel_regularizer'] is not None:
l1l2_lambda_k = list(layer.kernel_regularizer.__dict__.values())
if layer.__dict__['bias_regularizer'] is not None:
l1l2_lambda_b = list(layer.bias_regularizer.__dict__.values())
if any([(_lambda != 0) for _lambda in (l1l2_lambda_k + l1l2_lambda_b)]):
W = layer.get_weights()
for idx,_lambda in enumerate(l1l2_lambda_k + l1l2_lambda_b):
if _lambda != 0:
_pow = 2**(idx % 2) # 1 if idx is even (l1), 2 if odd (l2)
l1l2_loss += _lambda*np.sum(np.abs(W[idx//2])**_pow)
return l1l2_loss
def _l1l2_rnn_loss(layer):
l1l2_loss = 0
if 'backward_layer' in layer.__dict__:
bidirectional = True
_layer = layer.layer
else:
_layer = layer
bidirectional = False
ldict = _layer.cell.__dict__
if 'kernel_regularizer' in ldict or \
'recurrent_regularizer' in ldict or \
'bias_regularizer' in ldict:
l1l2_lambda_k, l1l2_lambda_r, l1l2_lambda_b = [0,0], [0,0], [0,0]
if ldict['kernel_regularizer'] is not None:
l1l2_lambda_k = list(_layer.kernel_regularizer.__dict__.values())
if ldict['recurrent_regularizer'] is not None:
l1l2_lambda_r = list(_layer.recurrent_regularizer.__dict__.values())
if ldict['bias_regularizer'] is not None:
l1l2_lambda_b = list(_layer.bias_regularizer.__dict__.values())
all_lambda = l1l2_lambda_k + l1l2_lambda_r + l1l2_lambda_b
if any([(_lambda != 0) for _lambda in all_lambda]):
W = layer.get_weights()
idx_incr = len(W)//2 # accounts for 'use_bias'
for idx,_lambda in enumerate(all_lambda):
if _lambda != 0:
_pow = 2**(idx % 2) # 1 if idx is even (l1), 2 if odd (l2)
l1l2_loss += _lambda*np.sum(np.abs(W[idx//2])**_pow)
if bidirectional:
l1l2_loss += _lambda*np.sum(
np.abs(W[idx//2 + idx_incr])**_pow)
return l1l2_loss
TESTING IMPLEMENTATION:
from keras.layers import Input, Dense, LSTM, GRU, Bidirectional
from keras.models import Model
from keras.regularizers import l1, l2, l1_l2
import numpy as np
ipt = Input(shape=(1200,16))
x = LSTM(60, activation='relu', return_sequences=True,
recurrent_regularizer=l2(1e-3),)(ipt)
x = Bidirectional(GRU(60, activation='relu', bias_regularizer =l1(1e-4)))(x)
out = Dense(1, activation='sigmoid', kernel_regularizer =l1_l2(2e-4))(x)
model = Model(ipt,out)
model.compile(loss='binary_crossentropy', optimizer='adam')
X = np.random.rand(10,1200,16) # (batch_size, timesteps, input_dim)
Y = np.random.randint(0,2,(10,1))
keras_loss = model.evaluate(X,Y)
custom_loss = binary_crossentropy(Y, model.predict(X))
custom_loss += l1l2_weight_loss(model)
print('%.6f'%keras_loss + ' -- keras_loss')
print('%.6f'%custom_loss + ' -- custom_loss')
0.763822 -- keras_loss(See my answer for binary_crossentropy implementation)
Upvotes: 2