Reputation: 1579
I'm running some supervised experiments for a binary prediction problem. I'm using 10-fold cross validation to evaluate performance in terms of mean average precision (average precision for each fold divided by the number of folds for cross validation - 10 in my case). I would like to plot PR-curves of the result of mean average precision over these 10 folds, however I'm not sure the best way to do this.
A previous question in the Cross Validated Stack Exchange site raised this same problem. A comment recommended working through this example on plotting ROC curves across folds of cross validation from the Scikit-Learn site, and tailoring it to average precision. Here is the relevant section of code I've modified to try this idea:
from scipy import interp
# Other packages/functions are imported, but not crucial to the question
max_ent = LogisticRegression()
mean_precision = 0.0
mean_recall = np.linspace(0,1,100)
mean_average_precision = []
for i in set(folds):
y_scores = max_ent.fit(X_train, y_train).decision_function(X_test)
precision, recall, _ = precision_recall_curve(y_test, y_scores)
average_precision = average_precision_score(y_test, y_scores)
mean_average_precision.append(average_precision)
mean_precision += interp(mean_recall, recall, precision)
# After this line of code, inspecting the mean_precision array shows that
# the majority of the elements equal 1. This is the part that is confusing me
# and is contributing to the incorrect plot.
mean_precision /= len(set(folds))
# This is what the actual MAP score should be
mean_average_precision = sum(mean_average_precision) / len(mean_average_precision)
# Code for plotting the mean average precision curve across folds
plt.plot(mean_recall, mean_precision)
plt.title('Mean AP Over 10 folds (area=%0.2f)' % (mean_average_precision))
plt.show()
The code runs, however in my case the mean average precision curve is incorrect. For some reason, the array I have assigned to store the mean_precision
scores (mean_tpr
variable in the ROC example) computes the first element to be near zero, and all other elements to be 1 after dividing by the number of folds. Below is a visualization of the mean_precision
scores plotted against the mean_recall
scores. As you can see, the plot jumps to 1 which is inaccurate.
So my hunch is something is going awry in the update of
mean_precision
(mean_precision += interp(mean_recall, recall, precision)
) at in each fold of cross-validation, but it's unclear how to fix this. Any guidance or help would be appreciated.
Upvotes: 15
Views: 17744
Reputation: 8856
Adding to @Dietmar's answer, I agree that it's mostly correct, except instead of using sklearn.metrics.auc
to compute area under precision recall curve, I think we should be using sklearn.metrics.average_precision_score
.
Supporting literature:
For example, in PR space it is incorrect to linearly interpolate between points
We provide evidence in favor of computing AUCPR using the lower trapezoid, average precision, or interpolated median estimators
From sklearn's documentation on average_precision_score
This implementation is not interpolated and is different from computing the area under the precision-recall curve with the trapezoidal rule, which uses linear interpolation and can be too optimistic.
Here's a fully reproducible example which I hope can help others if they cross this thread:
import matplotlib.pyplot as plt
import numpy as np
from numpy import interp
import pandas as pd
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, auc, average_precision_score, confusion_matrix, roc_curve, precision_recall_curve
from sklearn.model_selection import KFold, train_test_split, RandomizedSearchCV, StratifiedKFold
from sklearn.svm import SVC
%matplotlib inline
def draw_cv_roc_curve(classifier, cv, X, y, title='ROC Curve'):
"""
Draw a Cross Validated ROC Curve.
Args:
classifier: Classifier Object
cv: StratifiedKFold Object: (https://stats.stackexchange.com/questions/49540/understanding-stratified-cross-validation)
X: Feature Pandas DataFrame
y: Response Pandas Series
Example largely taken from http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html#sphx-glr-auto-examples-model-selection-plot-roc-crossval-py
"""
# Creating ROC Curve with Cross Validation
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(title)
plt.legend(loc="lower right")
plt.show()
def draw_cv_pr_curve(classifier, cv, X, y, title='PR Curve'):
"""
Draw a Cross Validated PR Curve.
Keyword Args:
classifier: Classifier Object
cv: StratifiedKFold Object: (https://stats.stackexchange.com/questions/49540/understanding-stratified-cross-validation)
X: Feature Pandas DataFrame
y: Response Pandas Series
Largely taken from: https://stackoverflow.com/questions/29656550/how-to-plot-pr-curve-over-10-folds-of-cross-validation-in-scikit-learn
"""
y_real = []
y_proba = []
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
# Compute ROC curve and area the curve
precision, recall, _ = precision_recall_curve(y.iloc[test], probas_[:, 1])
# Plotting each individual PR Curve
plt.plot(recall, precision, lw=1, alpha=0.3,
label='PR fold %d (AUC = %0.2f)' % (i, average_precision_score(y.iloc[test], probas_[:, 1])))
y_real.append(y.iloc[test])
y_proba.append(probas_[:, 1])
i += 1
y_real = np.concatenate(y_real)
y_proba = np.concatenate(y_proba)
precision, recall, _ = precision_recall_curve(y_real, y_proba)
plt.plot(recall, precision, color='b',
label=r'Precision-Recall (AUC = %0.2f)' % (average_precision_score(y_real, y_proba)),
lw=2, alpha=.8)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(title)
plt.legend(loc="lower right")
plt.show()
# Create a fake example where X is an 1000 x 2 Matrix
# Y is 1000 x 1 vector
# Binary Classification Problem
FOLDS = 5
X, y = make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=10.0,
random_state=12345)
X = pd.DataFrame(X)
y = pd.DataFrame(y)
f, axes = plt.subplots(1, 2, figsize=(10, 5))
X.loc[y.iloc[:, 0] == 1]
axes[0].scatter(X.loc[y.iloc[:, 0] == 0, 0], X.loc[y.iloc[:, 0] == 0, 1], color='blue', s=2, label='y=0')
axes[0].scatter(X.loc[y.iloc[:, 0] !=0, 0], X.loc[y.iloc[:, 0] != 0, 1], color='red', s=2, label='y=1')
axes[0].set_xlabel('X[:,0]')
axes[0].set_ylabel('X[:,1]')
axes[0].legend(loc='lower left', fontsize='small')
# Setting up simple RF Classifier
clf = RandomForestClassifier()
# Set up Stratified K Fold
cv = StratifiedKFold(n_splits=6)
draw_cv_roc_curve(clf, cv, X, y, title='Cross Validated ROC')
draw_cv_pr_curve(clf, cv, X, y, title='Cross Validated PR Curve')
Upvotes: 12
Reputation: 568
I had the same problem. Here is my solution: instead of averaging across the folds, I compute the precision_recall_curve
across the results from all folds, after the loop. According to the discussion in https://stats.stackexchange.com/questions/34611/meanscores-vs-scoreconcatenation-in-cross-validation this is a generally preferable approach.
import matplotlib.pyplot as plt
import numpy
from sklearn.datasets import make_blobs
from sklearn.metrics import precision_recall_curve, auc
from sklearn.model_selection import KFold
from sklearn.svm import SVC
FOLDS = 5
X, y = make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=10.0,
random_state=12345)
f, axes = plt.subplots(1, 2, figsize=(10, 5))
axes[0].scatter(X[y==0,0], X[y==0,1], color='blue', s=2, label='y=0')
axes[0].scatter(X[y!=0,0], X[y!=0,1], color='red', s=2, label='y=1')
axes[0].set_xlabel('X[:,0]')
axes[0].set_ylabel('X[:,1]')
axes[0].legend(loc='lower left', fontsize='small')
k_fold = KFold(n_splits=FOLDS, shuffle=True, random_state=12345)
predictor = SVC(kernel='linear', C=1.0, probability=True, random_state=12345)
y_real = []
y_proba = []
for i, (train_index, test_index) in enumerate(k_fold.split(X)):
Xtrain, Xtest = X[train_index], X[test_index]
ytrain, ytest = y[train_index], y[test_index]
predictor.fit(Xtrain, ytrain)
pred_proba = predictor.predict_proba(Xtest)
precision, recall, _ = precision_recall_curve(ytest, pred_proba[:,1])
lab = 'Fold %d AUC=%.4f' % (i+1, auc(recall, precision))
axes[1].step(recall, precision, label=lab)
y_real.append(ytest)
y_proba.append(pred_proba[:,1])
y_real = numpy.concatenate(y_real)
y_proba = numpy.concatenate(y_proba)
precision, recall, _ = precision_recall_curve(y_real, y_proba)
lab = 'Overall AUC=%.4f' % (auc(recall, precision))
axes[1].step(recall, precision, label=lab, lw=2, color='black')
axes[1].set_xlabel('Recall')
axes[1].set_ylabel('Precision')
axes[1].legend(loc='lower left', fontsize='small')
f.tight_layout()
f.savefig('result.png')
Upvotes: 25
Reputation: 1
I couldn't find an answer posted in other discussions, so hopefully this can help. The main thing was to reverse recall and precision before using interp:
reversed_recall = np.fliplr([recall])[0]
reversed_precision = np.fliplr([precision])[0]
reversed_mean_precision += interp(mean_recall, reversed_recall, reversed_precision)
reversed_mean_precision[0] = 0.0
And making sure to reverse back when plotting:
reversed_mean_precision /= FOLDS
reversed_mean_precision[0] = 1
mean_auc_pr = auc(mean_recall, reversed_mean_precision)
plt.plot(mean_recall, np.fliplr([reversed_mean_precision])[0], 'k--',
label='Mean precision (area = %0.2f)' % mean_auc_pr, lw=2)
The full code here:
FOLDS = 10
AUCs = []
AUCs_proba = []
precision_combined = []
recall_combined = []
thresholds_combined = []
X_ = pred_features.as_matrix()
Y_ = pred_true.as_matrix()
k_fold = cross_validation.KFold(n=len(pred_features), n_folds=FOLDS,shuffle=True,random_state=None)
clf = svm.SVC(kernel='linear', C = 1.0)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
reversed_mean_precision = 0.0
mean_recall = np.linspace(0, 1, 100)
all_precision = []
for train_index, test_index in k_fold:
xtrain, xtest = pred_features.iloc[train_index], pred_features.iloc[test_index]
ytrain, ytest = pred_true[train_index], pred_true[test_index]
test_prob = clf.fit(xtrain,ytrain).predict(xtest)
precision, recall, thresholds = metrics.precision_recall_curve(ytest, test_prob, pos_label=2)
reversed_recall = np.fliplr([recall])[0]
reversed_precision = np.fliplr([precision])[0]
reversed_mean_precision += interp(mean_recall, reversed_recall, reversed_precision)
reversed_mean_precision[0] = 0.0
AUCs.append(metrics.auc(recall, precision))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
reversed_mean_precision /= FOLDS
reversed_mean_precision[0] = 1
mean_auc_pr = auc(mean_recall, reversed_mean_precision)
plt.plot(mean_recall, np.fliplr([reversed_mean_precision])[0], 'k--',
label='Mean precision (area = %0.2f)' % mean_auc_pr, lw=2)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall')
plt.legend(loc="lower right")
plt.show()
print "AUCs: "
print sum(AUCs) / float(len(AUCs))
Upvotes: 0