Konrad S
Konrad S

Reputation: 71

Actually printing values from tensor object

I'm currently trying to implement a basic autoencoder using Keras, and I have come to the stage where I would want the output from the second hidden layer. I think that I'm able to get the right object, the problem is that I get it as a tensor object, the code I've been trying to run is the following:

from keras.layers import Input, Dense, initializers
import numpy as np
from Dataset import Dataset
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense, Activation
import tensorflow as tf
import time

#global variables
d = Dataset()
num_features = d.X_train.shape[1]
#input = [784, 400, 100, 10, 100, 400]
#output = [400, 100, 10, 100, 400, 784]
names = ['hidden1', 'hidden2', 'hidden3', 'hidden4', 'hidden5', 'hidden6']

list_of_nodes = [784, 400, 144, 10]

def generate_hidden_nodes(list_of_nodes):
    input = []
    for j in range(len(list_of_nodes)):
        input.append(list_of_nodes[j])
    for i in range(len(list_of_nodes)-2):
        input.append(list_of_nodes[-2-i])
    output = input[::-1]
    return input, output

input,output = generate_hidden_nodes(list_of_nodes)





def autoencoder(epochs):
    w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
    model = Sequential()
    input, output = generate_hidden_nodes(list_of_nodes)
    for j in range(len(input)):
        if j == (len(input)-1):
            model.add(Dense(output[j], activation='sigmoid', kernel_initializer=w, input_dim=input[j], name=names[j]))
            #model.add(Dropout(0.45))
        else:
            model.add(Dense(output[j], activation='relu', kernel_initializer=w, input_dim=input[j],
                            name = names[j]))
            #model.add(Dropout(0.45))
    model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=['acc'])
    history = model.fit(d.X_train, d.X_train,
                        epochs=epochs,
                        batch_size=50,
                        shuffle=True,
                        validation_split = 0.2)
                        #validation_data=(d.X_test, d.X_test))
    #print(history.history.keys())
    #plt.plot(history.history['val_acc'])
    #print(history.history['val_acc'])
    plt.show()
    return model

def cv():
    accuracy = 0
    size = 5
    epochs = 20
    variance = 0
    storage = np.zeros((size, epochs))
    for j in range(size):
        ae = autoencoder(epochs)
        #print(ae.history.history['val_acc'])
        storage[j] = ae.history.history['val_acc']
    for i in range(size):
        accuracy += storage[i][-1]
    mean = accuracy/size
    for k in range(size):
        variance += ((storage[k][-1] - mean)**2)
    variance = variance/size
    return mean, variance

#mean, variance = cv()
#print(mean)
#print(variance)
#time.sleep(10)

def finding_index():
    elements, index = np.unique(d.Y_test, return_index=True)
    return elements, index

def plotting():
    ae = autoencoder(20)
    elements, index = finding_index()
    y_proba = ae.predict(d.X_test)
    plt.figure(figsize=(20, 4))
    # size = 20
    for i in range(len(index)):
        ax = plt.subplot(2, len(index), i + 1)
        plt.imshow(d.X_test[index[i]].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)

        ax = plt.subplot(2, len(index), i + 1 + len(index))
        plt.imshow(y_proba[index[i]].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    plt.show()

def plotting_weights(epochs):
    ae = autoencoder(epochs)
    output_layer = ae.get_layer('hidden2')
    weights = output_layer.get_weights()[0]
    print(weights.shape)
    size = 20
    plt.figure(figsize=(20, 4))
    for j in range(3):
        plt.gray()
        plt.imshow(weights[j].reshape(12, 12))
        plt.show()

def get_output():
    w = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
    new_model = Sequential()
    new_model.add(Dense(400, activation='relu', kernel_initializer=w, input_dim = 784))
    new_model.add(Dense(144, activation='sigmoid', kernel_initializer=w, input_dim = 400))
    #new_model.add(Dense(784, activation='sigmoid', kernel_initializer=w, input_dim = 144))
    new_model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=['acc'])
    history = new_model.fit(d.X_train, d.X_train,
                        epochs=20,
                        batch_size=50,
                        shuffle=True,
                        validation_split=0.2)
    y = new_model.predict(d.X_test)
    elements, index = finding_index()

    #return y.shape

def get_output2():
    ae = autoencoder(5)
    a =ae.layers[1].output()
    init_op = tf.initialize_all_variables()
    with tf.Session() as sess:
        sess.run(init_op)  # execute init_op
        # print the random values that we sample
        print(a)

get_output2()

I've tried to just print(a) as well, but as I said, that returns me a tensor object. Can someone provide me some information how I can actually print those value? Thanks in advance!

Upvotes: 0

Views: 37

Answers (1)

Pedro Marques
Pedro Marques

Reputation: 2682

Simplest:

import keras.backend as K
print(K.eval(ae.layers[1].output()))

This is equivalent to:

with tf.Session() as sess:
  print(sess.run(a))

I find it more readable to simply use the keras.backend interface.

Upvotes: 1

Related Questions