Engine
Engine

Reputation: 5432

Weird accuracy result in ConvNN Tensorflow

I've built a convNN to classify a bunch of images, First the accuracy was really bad ( bellow 6%), no matter what I've changed in the network. So I thought that the problem is the way I Read the images. After change this parts of code, I get a result that I can't explain: enter image description here

I guess the the way I use the batch is the problem, so is the code I'm using, for that:

def getImage(filename):
    with tf.device('/cpu:0'):
        # convert filenames to a queue for an input pipeline.
        filenameQ = tf.train.string_input_producer([filename],num_epochs=None)
    # object to read records
    recordReader = tf.TFRecordReader()

    # read the full set of features for a single example
    key, fullExample = recordReader.read(filenameQ)

    # parse the full example into its' component features.
    features = tf.parse_single_example(
        fullExample,
        features={
            'image/height': tf.FixedLenFeature([], tf.int64),
            'image/width': tf.FixedLenFeature([], tf.int64),
            'image/colorspace': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
            'image/channels':  tf.FixedLenFeature([], tf.int64),
            'image/class/label': tf.FixedLenFeature([],tf.int64),
            'image/class/text': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
            'image/format': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
            'image/filename': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
            'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value='')
        })

    # now we are going to manipulate the label and image features
    label = features['image/class/label']
    image_buffer = features['image/encoded']
    # Decode the jpeg
    with tf.name_scope('decode_img',[image_buffer], None):
        # decode
        image = tf.image.decode_png( image_buffer, channels=1)

        # and convert to single precision data type
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # cast image into a single array, where each element corresponds to the greyscale
    # value of a single pixel.
    # the "1-.." part inverts the image, so that the background is black.
    image = tf.reshape(image,[img_height*img_width])
    # re-define label as a "one-hot" vector
    # it will be [0,1] or [1,0] here.
    # This approach can easily be extended to more classes.
    label=tf.stack(tf.one_hot(label-1, numberOFclasses))
    return image, label

with tf.device('/cpu:0'):
    train_img,train_label = getImage(TF_Records+"/TrainRecords")
    validation_img,validation_label=getImage(TF_Records+"/TestRecords")
    # associate the "label_batch" and "image_batch" objects with a randomly selected batch---
    # of labels and images respectively
    train_imageBatch, train_labelBatch = tf.train.shuffle_batch([train_img, train_label], batch_size=batchSize,capacity=50,min_after_dequeue=10)

    # and similarly for the validation data
    validation_imageBatch, validation_labelBatch = tf.train.shuffle_batch([validation_img, validation_label],
                                                    batch_size=batchSize,capacity=50,min_after_dequeue=10)

    # feeding function
    def feed_dict(train):
        if True :
            #img_batch, labels_batch= tf.train.shuffle_batch([train_label,train_img],batch_size=batchSize,capacity=500,min_after_dequeue=200)
            img_batch , labels_batch = sess.run([ train_labelBatch ,train_imageBatch])
            dropoutValue = 0.7
        else:
            #   img_batch,labels_batch = tf.train.shuffle_batch([validation_label,validation_img],batch_size=batchSize,capacity=500,min_after_dequeue=200)
            img_batch,labels_batch = sess.run([ validation_labelBatch,validation_imageBatch])
            dropoutValue = 1
        return {x:img_batch,y_:labels_batch,keep_prob:dropoutValue}

    for i  in range(max_numberofiteretion):
        if i%10 == 0:#Run a Test
            summary, acc = sess.run([merged,accuracy],feed_dict=feed_dict(False))
            test_writer.add_summary(summary,i)# Save to TensorBoard
        else: # Training
          if i % 100 == 99:  # Record execution stats
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            summary, _ = sess.run([merged, train_step],
                                  feed_dict=feed_dict(True),
                                  options=run_options,
                                  run_metadata=run_metadata)
            train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
            train_writer.add_summary(summary, i)
            print('Adding run metadata for', i)
          else:  # Record a summary
            summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
            train_writer.add_summary(summary, i)
        # finalise
        coord.request_stop()
        coord.join(threads)
        train_writer.close()
        test_writer.close()

Any idea what's the problem here is ? thanks in advance ! PS: the images are definitely been loaded I can see them in Tensorboard

Upvotes: 1

Views: 191

Answers (1)

pypypy
pypypy

Reputation: 1105

One thing I picked up is here:

return label, image

with tf.device('/cpu:0'):
    train_img,train_label = getImage(TF_Records+"/TrainRecords")
    validation_img,validation_label=getImage(TF_Records+"/TestRecords")

Looks like in the function get_image() you return label, image but when you call the function you reverse the assignments.

Upvotes: 1

Related Questions