Anna Railton
Anna Railton

Reputation: 41

How to plot different summary metrics on the same plot with Tensorboard?

I would like to be able plot the training loss per batch and the average validation loss for the validation set on the same plot in Tensorboard. I ran into this issue when my validation set was too large to fit into memory so required batching and the use of tf.metrics update ops.

This question could apply to any Tensorflow metrics you wanted to appear on the same graph in Tensorboard.

I am able to

In the example code below, my issue stems from the fact that my validation summary tf.summary.scalar with name=loss gets renamed to loss_1 and thus is moved to a separate graph in Tensorboard. From what I can work out Tensorboard takes "same name" and plots them on the same graph, regardless of what folder they are in. This is frustrating as train_summ (name=loss) is only ever written to the train folder and valid_summ (name=loss) is only ever written to the valid folder - but is still renamed to loss_1.

The example code:

# View graphs with (Linux): $ tensorboard --logdir=/tmp/my_tf_model

import tensorflow as tf
import numpy as np
import os
import tempfile

def train_data_gen():
    yield np.random.normal(size=[3]), np.array([0.5, 0.5, 0.5])

def valid_data_gen():
    yield np.random.normal(size=[3]), np.array([0.8, 0.8, 0.8])

batch_size = 25
n_training_batches = 4
n_valid_batches = 2
n_epochs = 5
summary_loc = os.path.join(tempfile.gettempdir(), 'my_tf_model')
print("Summaries written to" + summary_loc)

# Dummy data
train_data = tf.data.Dataset.from_generator(train_data_gen, (tf.float32, tf.float32)).repeat().batch(batch_size)
valid_data = tf.data.Dataset.from_generator(valid_data_gen, (tf.float32, tf.float32)).repeat().batch(batch_size)
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, 
train_data.output_types, train_data.output_shapes)
batch_x, batch_y = iterator.get_next()
train_iter = train_data.make_initializable_iterator()
valid_iter = valid_data.make_initializable_iterator()

# Some ops on the data
loss = tf.losses.mean_squared_error(batch_x, batch_y)
valid_loss, valid_loss_update = tf.metrics.mean(loss)

# Write to summaries
train_summ = tf.summary.scalar('loss', loss)
valid_summ = tf.summary.scalar('loss', valid_loss)  # <- will be renamed to "loss_1"

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    train_handle, valid_handle = sess.run([train_iter.string_handle(), valid_iter.string_handle()])
    sess.run([train_iter.initializer, valid_iter.initializer])

    # Summary writers
    writer_train = tf.summary.FileWriter(os.path.join(summary_loc, 'train'), sess.graph)
    writer_valid = tf.summary.FileWriter(os.path.join(summary_loc, 'valid'), sess.graph)

    global_step = 0  # implicit as no actual training
    for i in range(n_epochs):
        # "Training"
        for j in range(n_training_batches):
            global_step += 1
            summ = sess.run(train_summ, feed_dict={handle: train_handle})
            writer_train.add_summary(summary=summ, global_step=global_step)
        # "Validation"
        sess.run(tf.local_variables_initializer())
        for j in range(n_valid_batches):
             _, batch_summ = sess.run([valid_loss_update, train_summ], feed_dict={handle: valid_handle})
            # The following will plot the batch loss for the validation set on the loss plot with the training data:
            # writer_valid.add_summary(summary=batch_summ, global_step=global_step + j + 1)
        summ = sess.run(valid_summ)
        writer_valid.add_summary(summary=summ, global_step=global_step)  # <- I want this on the training loss graph

What I have tried

Context

Upvotes: 2

Views: 3655

Answers (1)

Anna Railton
Anna Railton

Reputation: 41

The Tensorboard custom_scalar plugin is the way to solve this problem.

Here's the same example again with a custom_scalar to plot the two losses (per training batch + averaged over all validation batches) on the same plot:

# View graphs with (Linux): $ tensorboard --logdir=/tmp/my_tf_model

import os
import tempfile
import tensorflow as tf
import numpy as np
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2

def train_data_gen():
    yield np.random.normal(size=[3]), np.array([0.5, 0.5, 0.5])

def valid_data_gen():
    yield np.random.normal(size=[3]), np.array([0.8, 0.8, 0.8])

batch_size = 25
n_training_batches = 4
n_valid_batches = 2
n_epochs = 5
summary_loc = os.path.join(tempfile.gettempdir(), 'my_tf_model')
print("Summaries written to " + summary_loc)

# Dummy data
train_data = tf.data.Dataset.from_generator(
    train_data_gen, (tf.float32, tf.float32)).repeat().batch(batch_size)
valid_data = tf.data.Dataset.from_generator(
    valid_data_gen, (tf.float32, tf.float32)).repeat().batch(batch_size)
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, train_data.output_types,
                                               train_data.output_shapes)
batch_x, batch_y = iterator.get_next()
train_iter = train_data.make_initializable_iterator()
valid_iter = valid_data.make_initializable_iterator()

# Some ops on the data
loss = tf.losses.mean_squared_error(batch_x, batch_y)
valid_loss, valid_loss_update = tf.metrics.mean(loss)

with tf.name_scope('loss'):
    train_summ = summary_lib.scalar('training', loss)
    valid_summ = summary_lib.scalar('valid', valid_loss)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    train_handle, valid_handle = sess.run([train_iter.string_handle(), valid_iter.string_handle()])
    sess.run([train_iter.initializer, valid_iter.initializer])

    writer_train = tf.summary.FileWriter(os.path.join(summary_loc, 'train'), sess.graph)
    writer_valid = tf.summary.FileWriter(os.path.join(summary_loc, 'valid'), sess.graph)

    layout_summary = summary_lib.custom_scalar_pb(
        layout_pb2.Layout(category=[
            layout_pb2.Category(
                title='losses',
                chart=[
                    layout_pb2.Chart(
                        title='losses',
                        multiline=layout_pb2.MultilineChartContent(tag=[
                            'loss/training', 'loss/valid'
                        ]))
                ])
        ]))
    writer_train.add_summary(layout_summary)

    global_step = 0
    for i in range(n_epochs):
        for j in range(n_training_batches): # "Training"
            global_step += 1
            summ = sess.run(train_summ, feed_dict={handle: train_handle})
            writer_train.add_summary(summary=summ, global_step=global_step)

        sess.run(tf.local_variables_initializer())
        for j in range(n_valid_batches):  # "Validation"
            _, batch_summ = sess.run([valid_loss_update, train_summ], feed_dict={handle: valid_handle})
        summ = sess.run(valid_summ)
        writer_valid.add_summary(summary=summ, global_step=global_step)

Here's the resulting output in Tensorboard.

Upvotes: 2

Related Questions