Reputation: 1007
I made a feed forward single neuron network. The prediction prints 0.5 while it should print 0.0. I'm very new to tensorflow. Please help me. This is my code:
"""
O---(w1)-\
\
O---(w2)-->Sum ---> Sigmoid ---> O 3 inputs and 1 output
/
O---(w3)-/
| Input | Output
Example 1 | 0 0 1 | 0
Example 2 | 1 1 1 | 1
Example 3 | 1 0 1 | 1
Exmaple 4 | 0 1 1 | 0
"""
import tensorflow as tf
features = tf.placeholder(tf.float32, [None, 3])
labels = tf.placeholder(tf.float32, [None])
#Random weights
W = tf.Variable([[-0.16595599], [0.44064899], [-0.99977125]], tf.float32)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
predict = tf.nn.sigmoid(tf.matmul(features, W))
error = labels - predict
# Training
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(error)
for i in range(10000):
sess.run(train, feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels: [0, 1, 1, 0]})
training_cost = sess.run(error, feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels: [0, 1, 1, 0]})
print('Training cost = ', training_cost, 'W = ', sess.run(W))
print(sess.run(predict, feed_dict={features:[[0, 1, 1]]}))
I've also manually made this model using only numpy which works well.
Edit: I've tried all types of cost function including tf.reduce_mean(predict-labels)**2)
Upvotes: 2
Views: 770
Reputation: 4101
you had two mistakes
(a) your original error function optimises the wrong objective
(b) your target vector was transposed
The following line makes it visible
print(sess.run(predict-label,
feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]],
labels: [0, 1, 1, 0]})}))
the result is a 4x4 matrix.
You can achieved the desired result using the following code
import tensorflow as tf
features = tf.placeholder(tf.float32, [None, 3])
labels = tf.placeholder(tf.float32, [None,1])
#Random weights
W = tf.Variable([[10.0], [000.0], [0.200]], tf.float32)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
predict = tf.nn.sigmoid(tf.matmul(features, W))
print(sess.run(predict, feed_dict={features:[[0, 1, 1]]}))
lbls= [[0], [1], [1], [0]]
print(sess.run(predict,
feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels:lbls}))
# error = labels - predict
error = tf.reduce_mean((labels - predict)**2)
# Training
optimizer = tf.train.GradientDescentOptimizer(10)
train = optimizer.minimize(error)
for i in range(100):
sess.run(train,
feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels: lbls})
training_cost = sess.run(error,
feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]],
labels: lbls})
classe = sess.run((labels-predict),
feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]],
labels: lbls})
print('Training cost = ', training_cost, 'W = ', classe)
print(sess.run(predict,
feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]]}))
Upvotes: 2