Reputation: 1
much_data = np.load('muchdata-50-50-20.npy',allow_pickle=True)
# If you are working with the basic sample data, use maybe 2 instead of 100 here... you don't have enough data to really do this
train_data = much_data[:-100]
validation_data = much_data[-100:]
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.optimizers.Adam(learning_rate=1e-3).minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
successful_runs = 0
total_runs = 0
for epoch in range(hm_epochs):
epoch_loss = 0
for data in train_data:
total_runs += 1
try:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
epoch_loss += c
successful_runs += 1
except Exception as e:
# I am passing for the sake of notebook space, but we are getting 1 shaping issue from one
# input tensor. Not sure why, will have to look into it. Guessing it's
# one of the depths that doesn't come to 20.
pass
#print(str(e))
print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
print('Done. Finishing accuracy:')
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
print('fitment percent:',successful_runs/total_runs)
# Run this locally:
train_neural_network(x)
output:
TypeError Traceback (most recent call last)
<ipython-input-22-a2ff083095aa> in <module>
48
49 # Run this locally:
---> 50 train_neural_network(x)
<ipython-input-22-a2ff083095aa> in train_neural_network(x)
9 prediction = convolutional_neural_network(x)
10 cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
---> 11 optimizer = tf.optimizers.Adam(learning_rate=1e-3).minimize(cost)
12
13 hm_epochs = 10
TypeError: minimize() missing 1 required positional argument: 'var_list'
Upvotes: 0
Views: 1291
Reputation:
The syntax used for minimize()
in your code is not correct as this optimizer method needs at least 2 parameters to minimize loss
by updating var_list
.
minimize(
loss, var_list, grad_loss=None, name=None, tape=None
)
You can check this reference to get more details on minimize()
.
Upvotes: 1