Reputation: 681
I'm using keras and want to implement custom learning rate via keras.callbacks.LearningRateScheduler
How can I pass learning rate to be able to monitor it in tensorboard ? (keras.callbacks.TensorBoard
)
Currently I have:
lrate = LearningRateScheduler(lambda epoch: initial_lr * 0.95 ** epoch)
tensorboard = TensorBoard(log_dir=LOGDIR, histogram_freq=1,
batch_size=batch_size, embeddings_freq=1,
embeddings_layer_names=embedding_layer_names )
model.fit_generator(train_generator, steps_per_epoch=n_steps,
epochs=n_epochs,
validation_data=(val_x, val_y),
callbacks=[lrate, tensorboard])
Upvotes: 7
Views: 2230
Reputation: 907
Creating LearningRateScheduler
logs for TensorBoard
can be done with the following:
from tensorflow.keras.callbacks import LearningRateScheduler, TensorBoard
# Define your scheduling function
def scheduler(epoch):
return return 0.001 * 0.95 ** epoch
# Define scheduler
lr_scheduler = LearningRateScheduler(scheduler)
# Alternatively, use an anonymous function
# lr_scheduler = LearningRateScheduler(lambda epoch: initial_lr * 0.95 ** epoch)
# Define TensorBoard callback child class
class LRTensorBoard(TensorBoard):
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir, **kwargs)
self.lr_writer = tf.summary.create_file_writer(self.log_dir + '/learning')
def on_epoch_end(self, epoch, logs=None):
lr = getattr(self.model.optimizer, 'lr', None)
with self.lr_writer.as_default():
summary = tf.summary.scalar('learning_rate', lr, epoch)
super().on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super().on_train_end(logs)
self.lr_writer.close()
# Create callback object
tensorboard_callback = LRTensorBoard(log_dir='./logs/', histogram_freq=1)
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
r = model.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=25, batch_size=200,
callbacks=[tensorboard_callback, lr_scheduler])
The learning rate can then be viewed in TensorBoard
via
# Load the TensorBoard notebook extension
%load_ext tensorboard
#Start TensorBoard
%tensorboard --logdir ./logs
Upvotes: 1
Reputation: 1624
I'm not sure how to pass it to Tensorboard, but you can monitor it from python.
from keras.callbacks import Callback
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(initial_lr * 0.95 ** len(self.losses))
loss_hist = LossHistory()
Then just add loss_hist
to your callbacks
.
Update:
Based on this answer:
class LRTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
super(LRTensorBoard, self).__init__(log_dir, **kwargs)
self.lr_log_dir = log_dir
def set_model(self, model):
self.lr_writer = tf.summary.FileWriter(self.lr_log_dir)
super(LRTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
lr = initial_lr * 0.95 ** epoch
summary = tf.Summary(value=[tf.Summary.Value(tag='lr',
simple_value=lr)])
self.lr_writer.add_summary(summary, epoch)
self.lr_writer.flush()
super(LRTensorBoard, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(LRTensorBoard, self).on_train_end(logs)
self.lr_writer.close()
Just use it like the normal TensorBoard
.
Upvotes: 2