Reputation: 221
I am trying to implement a multi label sentence classification model using tensorflow. There are around 1500 labels. The model works pretty fine, however I am not sure about the metrics it generates.
This is the piece of code that generates metrics:
with tf.name_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores) # only named arguments accepted
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name='accuracy')
with tf.name_scope('num_correct'):
correct = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.num_correct = tf.reduce_sum(tf.cast(correct, 'float'))
with tf.name_scope('fp'):
fp = tf.metrics.false_positives(labels=tf.argmax(self.input_y, 1), predictions=self.predictions)
self.fp = tf.reduce_sum(tf.cast(fp, 'float'), name='fp')
with tf.name_scope('fn'):
fn = tf.metrics.false_negatives(labels=tf.argmax(self.input_y, 1), predictions=self.predictions)
self.fn = tf.reduce_sum(tf.cast(fn, 'float'), name='fn')
with tf.name_scope('recall'):
self.recall = self.num_correct / (self.num_correct + self.fn)
with tf.name_scope('precision'):
self.precision = self.num_correct / (self.num_correct + self.fp)
with tf.name_scope('F1'):
self.F1 = (2 * self.precision * self.recall) / (self.precision + self.recall)
with tf.name_scope('merged_summary'):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("accuracy", self.accuracy)
tf.summary.scalar("recall", self.recall)
tf.summary.scalar("precision", self.precision)
tf.summary.scalar("f-measure", self.F1)
self.merged_summary = tf.summary.merge_all()
Then, in the train part, I create the saver for Tensorboard:
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
Finally, the training saves the metrics as follow:
for train_batch in train_batches:
x_train_batch, y_train_batch = zip(*train_batch)
train_step(x_train_batch, y_train_batch)
current_step = tf.train.global_step(sess, global_step)
# Evaluate the model with x_dev and y_dev
if current_step % params['evaluate_every'] == 0:
dev_batches = data_helper.batch_iter(list(zip(x_dev, y_dev)), params['batch_size'], 1)
total_dev_correct = 0
for dev_batch in dev_batches:
x_dev_batch, y_dev_batch = zip(*dev_batch)
acc, loss, num_dev_correct, predictions, recall, precision, f1, summary = dev_step(x_dev_batch, y_dev_batch)
total_dev_correct += num_dev_correct
accuracy = float(total_dev_correct) / len(y_dev)
logging.info('Accuracy on dev set: {}'.format(accuracy))
# added loss
logging.info('Loss on dev set: {}'.format(loss))
# adding more measures
logging.info('Recall on dev set: {}'.format(recall))
logging.info('Precision on dev set: {}'.format(precision))
logging.info('F1 on dev set: {}'.format(f1))
summary_writer.add_summary(summary, current_step)
if accuracy >= best_accuracy:
best_accuracy, best_loss, best_at_step, best_recall, best_precision, best_f1 = accuracy, loss, current_step, recall, precision, f1
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logging.critical('Saved model {} at step {}'.format(path, best_at_step))
logging.critical('Best accuracy {} at step {}'.format(best_accuracy, best_at_step))
logging.critical('Best loss {} at step {}'.format(best_loss, best_at_step))
logging.critical('Best recall {} at step {}'.format(best_recall, best_at_step))
logging.critical('Best precision {} at step {}'.format(best_precision, best_at_step))
logging.critical('Best F1 {} at step {}'.format(best_f1, best_at_step))
logging.critical('Training is complete, testing the best model on x_test and y_test')
dev_step and train_step look as following:
def train_step(x_batch, y_batch):
feed_dict = {
cnn_rnn.input_x: x_batch,
cnn_rnn.input_y: y_batch,
cnn_rnn.dropout_keep_prob: params['dropout_keep_prob'],
cnn_rnn.batch_size: len(x_batch),
cnn_rnn.pad: np.zeros([len(x_batch), 1, params['embedding_dim'], 1]),
cnn_rnn.real_len: real_len(x_batch),
}
_, step, loss, accuracy = sess.run([train_op, global_step, cnn_rnn.loss, cnn_rnn.accuracy], feed_dict)
def dev_step(x_batch, y_batch):
feed_dict = {
cnn_rnn.input_x: x_batch,
cnn_rnn.input_y: y_batch,
cnn_rnn.dropout_keep_prob: 1.0,
cnn_rnn.batch_size: len(x_batch),
cnn_rnn.pad: np.zeros([len(x_batch), 1, params['embedding_dim'], 1]),
cnn_rnn.real_len: real_len(x_batch),
}
step, loss, accuracy, num_correct, predictions, recall, precision, f1, summary = sess.run(
[global_step, cnn_rnn.loss, cnn_rnn.accuracy, cnn_rnn.num_correct, cnn_rnn.predictions, cnn_rnn.recall, cnn_rnn.precision, cnn_rnn.F1, cnn_rnn.merged_summary], feed_dict)
return accuracy, loss, num_correct, predictions, recall, precision, f1, summary
My question is, are the metrics generated properly for a multi label classification problem, or should I go through a confusion matrix to do so? In case if I should use a confusion matrix, should I add:
tf.confusion_matrix(labels=, predictions=)
in the first part of the code where I declare metrics? If yes, what should I do next to get precision and recall.
Edit: I've added this but the image in tensor board is just a black screen.
batch_confusion = tf.confusion_matrix(labels=tf.argmax(self.input_y, 1), predictions=self.predictions, name='batch_confusion', num_classes=num_classes)
confusion = tf.Variable(tf.zeros([num_classes, num_classes], dtype=tf.int32), name='confusion')
confusion_image = tf.reshape(tf.cast(confusion, tf.float32), [1, num_classes, num_classes, 1])
tf.summary.image('confusion', confusion_image)
Thanks for your help,
Upvotes: 2
Views: 5836
Reputation: 14704
The multi-label setting is quite different from the single-label setting in that you have to define what you mean by Positive. Does it mean all labels have to be True or do you count any Positive as a (partial) success?
First case -> macro F1 score (axis=None
in count_nonzero
as you want all labels to agree for it to be a True Positive)
If second case then do you want all classes to have the same weight in how you measure success?
Yes -> micro F1 score (axis=1
as you compare results on a per-label basis)
No -> weighted F1 score, weights are the support of each class (idem for axis
)
From my answer to another SO question:
f1s = [0, 0, 0]
y_true = tf.cast(y_true, tf.float64)
y_pred = tf.cast(y_pred, tf.float64)
for i, axis in enumerate([None, 0]):
TP = tf.count_nonzero(y_pred * y_true, axis=axis)
FP = tf.count_nonzero(y_pred * (y_true - 1), axis=axis)
FN = tf.count_nonzero((y_pred - 1) * y_true, axis=axis)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2 * precision * recall / (precision + recall)
f1s[i] = tf.reduce_mean(f1)
weights = tf.reduce_sum(y_true, axis=0)
weights /= tf.reduce_sum(weights)
f1s[2] = tf.reduce_sum(f1 * weights)
micro, macro, weighted = f1s
def tf_f1_score(y_true, y_pred):
"""Computes 3 different f1 scores, micro macro
weighted.
micro: f1 score accross the classes, as 1
macro: mean of f1 scores per class
weighted: weighted average of f1 scores per class,
weighted from the support of each class
Args:
y_true (Tensor): labels, with shape (batch, num_classes)
y_pred (Tensor): model's predictions, same shape as y_true
Returns:
tuple(Tensor): (micro, macro, weighted)
tuple of the computed f1 scores
"""
f1s = [0, 0, 0]
y_true = tf.cast(y_true, tf.float64)
y_pred = tf.cast(y_pred, tf.float64)
for i, axis in enumerate([None, 0]):
TP = tf.count_nonzero(y_pred * y_true, axis=axis)
FP = tf.count_nonzero(y_pred * (y_true - 1), axis=axis)
FN = tf.count_nonzero((y_pred - 1) * y_true, axis=axis)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2 * precision * recall / (precision + recall)
f1s[i] = tf.reduce_mean(f1)
weights = tf.reduce_sum(y_true, axis=0)
weights /= tf.reduce_sum(weights)
f1s[2] = tf.reduce_sum(f1 * weights)
micro, macro, weighted = f1s
return micro, macro, weighted
def compare(nb, dims):
labels = (np.random.randn(nb, dims) > 0.5).astype(int)
predictions = (np.random.randn(nb, dims) > 0.5).astype(int)
stime = time()
mic = f1_score(labels, predictions, average='micro')
mac = f1_score(labels, predictions, average='macro')
wei = f1_score(labels, predictions, average='weighted')
print('sklearn in {:.4f}:\n micro: {:.8f}\n macro: {:.8f}\n weighted: {:.8f}'.format(
time() - stime, mic, mac, wei
))
gtime = time()
tf.reset_default_graph()
y_true = tf.Variable(labels)
y_pred = tf.Variable(predictions)
micro, macro, weighted = tf_f1_score(y_true, y_pred)
with tf.Session() as sess:
tf.global_variables_initializer().run(session=sess)
stime = time()
mic, mac, wei = sess.run([micro, macro, weighted])
print('tensorflow in {:.4f} ({:.4f} with graph time):\n micro: {:.8f}\n macro: {:.8f}\n weighted: {:.8f}'.format(
time() - stime, time()-gtime, mic, mac, wei
))
compare(10 ** 6, 10)
outputs:
>> rows: 10^6 dimensions: 10
sklearn in 2.3939:
micro: 0.30890287
macro: 0.30890275
weighted: 0.30890279
tensorflow in 0.2465 (3.3246 with graph time):
micro: 0.30890287
macro: 0.30890275
weighted: 0.30890279
Upvotes: 2