Wayne Ng
Wayne Ng

Reputation: 13

logits and labels must be same size: logits_size=[1200,7] labels_size=[600,7]

this is the first time i ask question in stackoverflow, thus i might not able to give all details of my problem in a single post.

I am trying to apply CNN on activity recognition dataset but current i facing a problem where logits and labels must be same size: logits_size=[1200,7] labels_size=[600,7].

Link of dataset Link to my github file

batch_size = 600       # Batch size
seq_len = 200          # Number of steps
learning_rate = 0.0005
epochs = 200
n_classes = 7
n_channels = 3

inputs_ = tf.placeholder(tf.float32, [None, seq_len, n_channels], name = 'inputs')
labels_ = tf.placeholder(tf.float32, [None, n_classes], name = 'labels')
keep_prob_ = tf.placeholder(tf.float32, name = 'keep')
learning_rate_ = tf.placeholder(tf.float32, name = 'learning_rate')


cconv1 = tf.layers.conv1d(inputs=inputs_, filters=18, kernel_size=2, strides=1, 
                         padding='same', activation = tf.nn.relu)
pool_1 = tf.layers.max_pooling1d(inputs=cconv1, pool_size=4, strides=4, padding='same')

cconv2 = tf.layers.conv1d(inputs=pool_1, filters=36, kernel_size=2, strides=1, 
                         padding='same', activation = tf.nn.relu)
pool_2 = tf.layers.max_pooling1d(inputs=cconv2, pool_size=4, strides=4, padding='same')

cconv3 = tf.layers.conv1d(inputs=pool_2, filters=72, kernel_size=2, strides=1, 
                         padding='same', activation = tf.nn.relu)
pool_3 = tf.layers.max_pooling1d(inputs=cconv3, pool_size=4, strides=4, padding='same')



flat = tf.reshape(pool_3, (-1, 2*72))
print(flat.get_shape())
flat = tf.nn.dropout(flat, keep_prob=keep_prob_)
print(flat.get_shape())
logits = tf.layers.dense(flat, n_classes)
print(logits.get_shape())
# Cost function and optimizer

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))
#     print(cost.get_shape())
optimizer = tf.train.AdamOptimizer(learning_rate_).minimize(cost)
tf.summary.scalar("cost",cost)

# Accuracy

correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tf.summary.scalar("accuracy",accuracy)

def get_batches(X, y, batch_size = 100):
    """ Return a generator for batches """
    n_batches = len(X) // batch_size
    X, y = X[:n_batches*batch_size], y[:n_batches*batch_size]

    # Loop over batches and yield
    for b in range(0, len(X), batch_size):
        yield X[b:b+batch_size], y[b:b+batch_size]

Below is the code where i run the model:

test_acc = []
test_loss = []

train_acc = []
train_loss = []

# with graph.as_default():
saver = tf.train.Saver()

# with tf.Session(graph=graph) as sess:
# with tf.Session() as sess:

sess = tf.Session()
sess.run(tf.global_variables_initializer())
# writer = tf.summary.FileWriter("logs/", sess.graph)

iteration = 1

for e in range(epochs):
#     tf.set_random_seed(123)
    # Loop over batches
    for x,y in get_batches(X_train, y_train, batch_size):

        # Feed dictionary
        feed = {inputs_ : x, labels_ : y, keep_prob_ : 0.5, learning_rate_ : learning_rate}

        # Loss
        loss, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict = feed)
        train_acc.append(acc)
        train_loss.append(loss)

        # Print at each 5 iters
        if (iteration % 5 == 0):
            print("Epoch: {}/{}".format(e, epochs),
                  "Iteration: {:d}".format(iteration),
                  "Train loss: {:6f}".format(loss),
                  "Train acc: {:.6f}".format(acc))

        # Compute validation loss at every 10 iterations
        if (iteration%10 == 0):                
            val_acc_ = []
            val_loss_ = []

            for x_t, y_t in get_batches(X_test, y_test, batch_size):
                # Feed
                feed = {inputs_ : x_t, labels_ : y_t, keep_prob_ : 1.0}  

                # Loss
                loss_v, acc_v = sess.run([cost, accuracy], feed_dict = feed)                    
                val_acc_.append(acc_v)
                val_loss_.append(loss_v)

            # Print info
            print("Epoch: {}/{}".format(e, epochs),
                  "Iteration: {:d}".format(iteration),
                  "Testing loss NOW: {:6f}".format(np.mean(val_loss_)),
                  "Testing acc NOW: {:.6f}".format(np.mean(val_acc_)))

            # Store
            test_acc.append(np.mean(val_acc_))
            test_loss.append(np.mean(val_loss_))

        # Iterate 
        iteration += 1

    print("Optimization Finished!")
print("Ended!")

All help is appreciated, thank in advance

Upvotes: 1

Views: 1111

Answers (1)

venkatesh-sg
venkatesh-sg

Reputation: 91

I guess problem is at reshape. Output of the pool_3 might be of the shape 4*1*72. As you are doing "same" padding zeros will be padded at the end.

You need to change reshape layer to flat = tf.reshape(pool_3, (-1, 4*72))

Upvotes: 2

Related Questions