Li Ai
Li Ai

Reputation: 211

"the operation's graph is different from the session's graph" Error

I am new to tensorflow and got this error message when I tried to create a graph and then execute some operations:

ValueError                                Traceback (most recent call last)
<ipython-input-136-9e5ed7cede4c> in <module>()
----> 1 get_ipython().run_cell_magic('time', '', '\nn_epochs = 20\nbatch_size = 5\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session(graph=graph) as sess:\n    print(0)\n    init.run()\n    print(1)\n    # init = tf.global_variables_initializer()\n    # saver = tf.train.Saver()\n    print(2)\n    for epoch in range(n_epochs):\n        for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n            sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n        mse_batch = loss.eval(feed_dict={X: X_batch, y: y_batch})\n        mse_valid = loss.eval(feed_dict={X: X_valid, y: y_valid})\n        print(epoch, "Batch mse:", mse_batch, "Validation mse:", mse_valid)\n\n    # save_path = saver.save(sess, "./my_model_final.ckpt")')

4 frames
</usr/local/lib/python3.6/dist-packages/decorator.py:decorator-gen-60> in time(self, line, cell, local_ns)

<timed exec> in <module>()

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in _run_using_default_session(operation, feed_dict, graph, session)
   5603                        "`run(session=sess)`")
   5604     if session.graph is not graph:
-> 5605       raise ValueError("Cannot use the default session to execute operation: "
   5606                        "the operation's graph is different from the "
   5607                        "session's graph. Pass an explicit session to "

ValueError: Cannot use the default session to execute operation: the operation's graph is different from the session's graph. Pass an explicit session to run(session=sess)

I'm trying to have a method to build multiple graphs for hyperparameter tuning (no of neurons per layer, no of hidden layers, etc.). The code is mainly from https://github.com/ageron/handson-ml as an example. I don't think I'm constructing and using the graph correctly.

import tensorflow as tf
import numpy as np
import pandas as pd


def reset_graph(seed=42):
    tf.reset_default_graph()
    tf.set_random_seed(seed)
    np.random.seed(seed)


def create_graph(n_inputs, n_outputs, n_hidden_layers=2, n_neurons_per_layer=100, activation_function=tf.nn.relu, learning_rate=0.01, 
                 optimize_method='nesterov'):

  g = tf.Graph()


  with g.as_default():
    X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
    y = tf.placeholder(tf.int32, shape=(None), name="y")

    # create layers
    with tf.name_scope("dnn"):
      hidden_layers = {}
      for i in range(n_hidden_layers):
        if i == 0:
          hidden_layers['hidden_1'] = tf.layers.dense(X, n_neurons_per_layer, activation=activation_function, name="hidden_1")
        else:
          name_last_layer = 'hidden_' + str(i)
          name_this_layer = 'hidden_' + str(i+1)
          hidden_layers[name_this_layer] = tf.layers.dense(hidden_layers[name_last_layer], n_neurons_per_layer, 
                                                           activation=activation_function, name="hidden_{}".format(i+1))
      name_last_hidden = 'hidden_' + str(n_hidden_layers)
      logits = tf.layers.dense(hidden_layers[name_last_hidden], n_outputs, name="logits")

    with tf.name_scope("loss"):
        xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
        loss = tf.reduce_mean(xentropy, name="loss")
        y_proba = tf.nn.softmax(logits)

    with tf.name_scope("eval"):
        correct = tf.nn.in_top_k(logits, y, 1)
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

    with tf.name_scope("train"):
      if optimize_method == 'nesterov': # nesterov optimizer
        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9, use_nesterov=True)
      elif optimize_method == 'adam': # adam optimizer
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
      else: # momentum optimizer
        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
      training_op = optimizer.minimize(loss)

  return g, loss, training_op, X, y


def shuffle_batch(X, y, batch_size):
    rnd_idx = np.random.permutation(len(X))
    n_batches = len(X) // batch_size
    for batch_idx in np.array_split(rnd_idx, n_batches):
        X_batch, y_batch = X[batch_idx], y[batch_idx]
        yield X_batch, y_batch

graph, loss, training_op, X, y = create_graph(100, 10)

n_epochs = 20
batch_size = 5

init = tf.global_variables_initializer()
# saver = tf.train.Saver()

with tf.Session(graph=graph) as sess:
    init.run()
    print(2)
    for epoch in range(n_epochs):
        for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
            sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
        mse_batch = loss.eval(feed_dict={X: X_batch, y: y_batch})
        mse_valid = loss.eval(feed_dict={X: X_valid, y: y_valid})
        print(epoch, "Batch mse:", mse_batch, "Validation mse:", mse_valid)

    # save_path = saver.save(sess, "./my_model_final.ckpt")

Upvotes: 0

Views: 400

Answers (1)

FinleyGibson
FinleyGibson

Reputation: 921

so the problem here is that you are specifying a graph object other than the default graph for the bulk of your operations (defined in the function as g, and outside the function as graph), but the operation tf.global_variables_initializer() is added to the default graph not graph.

Operations are added to the graph object when they are declared, not when called by session, so although you have specified

with tf.Session(graph=graph) as sess:

the init operation has been defined on a different graph to the one specified, and so cant be called in this session.

changing the init operation declaration to

with graph.as_default():
    init = tf.global_variables_initializer()

will solve.

Alternatively you can remove the declaration of init all together and just call tf.global_variables_initializer() directly within the session like so:

with tf.Session(graph=graph) as sess:
    sess.run(tf.global_variables_initializer())
    print(2)
    for epoch in range(n_epochs):
    ...

which will automatically initialize variables on the graph associated with sess.

Upvotes: 1

Related Questions