Skip to content

Commit 157479b

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 59dd128d4d26fff2ff197b8c1e801647a22e0158
1 parent ef4153b commit 157479b

File tree

1,554 files changed

+6138
-6090
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,554 files changed

+6138
-6090
lines changed
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/40f4aad91af595a370d7582e3a23bed7/plot_roc.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@
116116
},
117117
"outputs": [],
118118
"source": [
119-
"import matplotlib.pyplot as plt\n\nfrom sklearn.metrics import RocCurveDisplay\n\ndisplay = RocCurveDisplay.from_predictions(\n y_onehot_test[:, class_id],\n y_score[:, class_id],\n name=f\"{class_of_interest} vs the rest\",\n color=\"darkorange\",\n plot_chance_level=True,\n)\n_ = display.ax_.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=\"One-vs-Rest ROC curves:\\nVirginica vs (Setosa & Versicolor)\",\n)"
119+
"import matplotlib.pyplot as plt\n\nfrom sklearn.metrics import RocCurveDisplay\n\ndisplay = RocCurveDisplay.from_predictions(\n y_onehot_test[:, class_id],\n y_score[:, class_id],\n name=f\"{class_of_interest} vs the rest\",\n color=\"darkorange\",\n plot_chance_level=True,\n despine=True,\n)\n_ = display.ax_.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=\"One-vs-Rest ROC curves:\\nVirginica vs (Setosa & Versicolor)\",\n)"
120120
]
121121
},
122122
{
@@ -152,7 +152,7 @@
152152
},
153153
"outputs": [],
154154
"source": [
155-
"display = RocCurveDisplay.from_predictions(\n y_onehot_test.ravel(),\n y_score.ravel(),\n name=\"micro-average OvR\",\n color=\"darkorange\",\n plot_chance_level=True,\n)\n_ = display.ax_.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=\"Micro-averaged One-vs-Rest\\nReceiver Operating Characteristic\",\n)"
155+
"display = RocCurveDisplay.from_predictions(\n y_onehot_test.ravel(),\n y_score.ravel(),\n name=\"micro-average OvR\",\n color=\"darkorange\",\n plot_chance_level=True,\n despine=True,\n)\n_ = display.ax_.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=\"Micro-averaged One-vs-Rest\\nReceiver Operating Characteristic\",\n)"
156156
]
157157
},
158158
{
@@ -242,7 +242,7 @@
242242
},
243243
"outputs": [],
244244
"source": [
245-
"from itertools import cycle\n\nfig, ax = plt.subplots(figsize=(6, 6))\n\nplt.plot(\n fpr[\"micro\"],\n tpr[\"micro\"],\n label=f\"micro-average ROC curve (AUC = {roc_auc['micro']:.2f})\",\n color=\"deeppink\",\n linestyle=\":\",\n linewidth=4,\n)\n\nplt.plot(\n fpr[\"macro\"],\n tpr[\"macro\"],\n label=f\"macro-average ROC curve (AUC = {roc_auc['macro']:.2f})\",\n color=\"navy\",\n linestyle=\":\",\n linewidth=4,\n)\n\ncolors = cycle([\"aqua\", \"darkorange\", \"cornflowerblue\"])\nfor class_id, color in zip(range(n_classes), colors):\n RocCurveDisplay.from_predictions(\n y_onehot_test[:, class_id],\n y_score[:, class_id],\n name=f\"ROC curve for {target_names[class_id]}\",\n color=color,\n ax=ax,\n plot_chance_level=(class_id == 2),\n )\n\n_ = ax.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=\"Extension of Receiver Operating Characteristic\\nto One-vs-Rest multiclass\",\n)"
245+
"from itertools import cycle\n\nfig, ax = plt.subplots(figsize=(6, 6))\n\nplt.plot(\n fpr[\"micro\"],\n tpr[\"micro\"],\n label=f\"micro-average ROC curve (AUC = {roc_auc['micro']:.2f})\",\n color=\"deeppink\",\n linestyle=\":\",\n linewidth=4,\n)\n\nplt.plot(\n fpr[\"macro\"],\n tpr[\"macro\"],\n label=f\"macro-average ROC curve (AUC = {roc_auc['macro']:.2f})\",\n color=\"navy\",\n linestyle=\":\",\n linewidth=4,\n)\n\ncolors = cycle([\"aqua\", \"darkorange\", \"cornflowerblue\"])\nfor class_id, color in zip(range(n_classes), colors):\n RocCurveDisplay.from_predictions(\n y_onehot_test[:, class_id],\n y_score[:, class_id],\n name=f\"ROC curve for {target_names[class_id]}\",\n color=color,\n ax=ax,\n plot_chance_level=(class_id == 2),\n despine=True,\n )\n\n_ = ax.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=\"Extension of Receiver Operating Characteristic\\nto One-vs-Rest multiclass\",\n)"
246246
]
247247
},
248248
{
@@ -271,7 +271,7 @@
271271
},
272272
"outputs": [],
273273
"source": [
274-
"pair_scores = []\nmean_tpr = dict()\n\nfor ix, (label_a, label_b) in enumerate(pair_list):\n a_mask = y_test == label_a\n b_mask = y_test == label_b\n ab_mask = np.logical_or(a_mask, b_mask)\n\n a_true = a_mask[ab_mask]\n b_true = b_mask[ab_mask]\n\n idx_a = np.flatnonzero(label_binarizer.classes_ == label_a)[0]\n idx_b = np.flatnonzero(label_binarizer.classes_ == label_b)[0]\n\n fpr_a, tpr_a, _ = roc_curve(a_true, y_score[ab_mask, idx_a])\n fpr_b, tpr_b, _ = roc_curve(b_true, y_score[ab_mask, idx_b])\n\n mean_tpr[ix] = np.zeros_like(fpr_grid)\n mean_tpr[ix] += np.interp(fpr_grid, fpr_a, tpr_a)\n mean_tpr[ix] += np.interp(fpr_grid, fpr_b, tpr_b)\n mean_tpr[ix] /= 2\n mean_score = auc(fpr_grid, mean_tpr[ix])\n pair_scores.append(mean_score)\n\n fig, ax = plt.subplots(figsize=(6, 6))\n plt.plot(\n fpr_grid,\n mean_tpr[ix],\n label=f\"Mean {label_a} vs {label_b} (AUC = {mean_score :.2f})\",\n linestyle=\":\",\n linewidth=4,\n )\n RocCurveDisplay.from_predictions(\n a_true,\n y_score[ab_mask, idx_a],\n ax=ax,\n name=f\"{label_a} as positive class\",\n )\n RocCurveDisplay.from_predictions(\n b_true,\n y_score[ab_mask, idx_b],\n ax=ax,\n name=f\"{label_b} as positive class\",\n plot_chance_level=True,\n )\n ax.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=f\"{target_names[idx_a]} vs {label_b} ROC curves\",\n )\n\nprint(f\"Macro-averaged One-vs-One ROC AUC score:\\n{np.average(pair_scores):.2f}\")"
274+
"pair_scores = []\nmean_tpr = dict()\n\nfor ix, (label_a, label_b) in enumerate(pair_list):\n a_mask = y_test == label_a\n b_mask = y_test == label_b\n ab_mask = np.logical_or(a_mask, b_mask)\n\n a_true = a_mask[ab_mask]\n b_true = b_mask[ab_mask]\n\n idx_a = np.flatnonzero(label_binarizer.classes_ == label_a)[0]\n idx_b = np.flatnonzero(label_binarizer.classes_ == label_b)[0]\n\n fpr_a, tpr_a, _ = roc_curve(a_true, y_score[ab_mask, idx_a])\n fpr_b, tpr_b, _ = roc_curve(b_true, y_score[ab_mask, idx_b])\n\n mean_tpr[ix] = np.zeros_like(fpr_grid)\n mean_tpr[ix] += np.interp(fpr_grid, fpr_a, tpr_a)\n mean_tpr[ix] += np.interp(fpr_grid, fpr_b, tpr_b)\n mean_tpr[ix] /= 2\n mean_score = auc(fpr_grid, mean_tpr[ix])\n pair_scores.append(mean_score)\n\n fig, ax = plt.subplots(figsize=(6, 6))\n plt.plot(\n fpr_grid,\n mean_tpr[ix],\n label=f\"Mean {label_a} vs {label_b} (AUC = {mean_score :.2f})\",\n linestyle=\":\",\n linewidth=4,\n )\n RocCurveDisplay.from_predictions(\n a_true,\n y_score[ab_mask, idx_a],\n ax=ax,\n name=f\"{label_a} as positive class\",\n )\n RocCurveDisplay.from_predictions(\n b_true,\n y_score[ab_mask, idx_b],\n ax=ax,\n name=f\"{label_b} as positive class\",\n plot_chance_level=True,\n despine=True,\n )\n ax.set(\n xlabel=\"False Positive Rate\",\n ylabel=\"True Positive Rate\",\n title=f\"{target_names[idx_a]} vs {label_b} ROC curves\",\n )\n\nprint(f\"Macro-averaged One-vs-One ROC AUC score:\\n{np.average(pair_scores):.2f}\")"
275275
]
276276
},
277277
{
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/764d061a261a2e06ad21ec9133361b2d/plot_precision_recall.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969
},
7070
"outputs": [],
7171
"source": [
72-
"from sklearn.metrics import PrecisionRecallDisplay\n\ndisplay = PrecisionRecallDisplay.from_estimator(\n classifier, X_test, y_test, name=\"LinearSVC\", plot_chance_level=True\n)\n_ = display.ax_.set_title(\"2-class Precision-Recall curve\")"
72+
"from sklearn.metrics import PrecisionRecallDisplay\n\ndisplay = PrecisionRecallDisplay.from_estimator(\n classifier, X_test, y_test, name=\"LinearSVC\", plot_chance_level=True, despine=True\n)\n_ = display.ax_.set_title(\"2-class Precision-Recall curve\")"
7373
]
7474
},
7575
{
@@ -87,7 +87,7 @@
8787
},
8888
"outputs": [],
8989
"source": [
90-
"y_score = classifier.decision_function(X_test)\n\ndisplay = PrecisionRecallDisplay.from_predictions(\n y_test, y_score, name=\"LinearSVC\", plot_chance_level=True\n)\n_ = display.ax_.set_title(\"2-class Precision-Recall curve\")"
90+
"y_score = classifier.decision_function(X_test)\n\ndisplay = PrecisionRecallDisplay.from_predictions(\n y_test, y_score, name=\"LinearSVC\", plot_chance_level=True, despine=True\n)\n_ = display.ax_.set_title(\"2-class Precision-Recall curve\")"
9191
]
9292
},
9393
{
@@ -159,7 +159,7 @@
159159
},
160160
"outputs": [],
161161
"source": [
162-
"from collections import Counter\n\ndisplay = PrecisionRecallDisplay(\n recall=recall[\"micro\"],\n precision=precision[\"micro\"],\n average_precision=average_precision[\"micro\"],\n prevalence_pos_label=Counter(Y_test.ravel())[1] / Y_test.size,\n)\ndisplay.plot(plot_chance_level=True)\n_ = display.ax_.set_title(\"Micro-averaged over all classes\")"
162+
"from collections import Counter\n\ndisplay = PrecisionRecallDisplay(\n recall=recall[\"micro\"],\n precision=precision[\"micro\"],\n average_precision=average_precision[\"micro\"],\n prevalence_pos_label=Counter(Y_test.ravel())[1] / Y_test.size,\n)\ndisplay.plot(plot_chance_level=True, despine=True)\n_ = display.ax_.set_title(\"Micro-averaged over all classes\")"
163163
]
164164
},
165165
{
@@ -177,7 +177,7 @@
177177
},
178178
"outputs": [],
179179
"source": [
180-
"from itertools import cycle\n\nimport matplotlib.pyplot as plt\n\n# setup plot details\ncolors = cycle([\"navy\", \"turquoise\", \"darkorange\", \"cornflowerblue\", \"teal\"])\n\n_, ax = plt.subplots(figsize=(7, 8))\n\nf_scores = np.linspace(0.2, 0.8, num=4)\nlines, labels = [], []\nfor f_score in f_scores:\n x = np.linspace(0.01, 1)\n y = f_score * x / (2 * x - f_score)\n (l,) = plt.plot(x[y >= 0], y[y >= 0], color=\"gray\", alpha=0.2)\n plt.annotate(\"f1={0:0.1f}\".format(f_score), xy=(0.9, y[45] + 0.02))\n\ndisplay = PrecisionRecallDisplay(\n recall=recall[\"micro\"],\n precision=precision[\"micro\"],\n average_precision=average_precision[\"micro\"],\n)\ndisplay.plot(ax=ax, name=\"Micro-average precision-recall\", color=\"gold\")\n\nfor i, color in zip(range(n_classes), colors):\n display = PrecisionRecallDisplay(\n recall=recall[i],\n precision=precision[i],\n average_precision=average_precision[i],\n )\n display.plot(ax=ax, name=f\"Precision-recall for class {i}\", color=color)\n\n# add the legend for the iso-f1 curves\nhandles, labels = display.ax_.get_legend_handles_labels()\nhandles.extend([l])\nlabels.extend([\"iso-f1 curves\"])\n# set the legend and the axes\nax.legend(handles=handles, labels=labels, loc=\"best\")\nax.set_title(\"Extension of Precision-Recall curve to multi-class\")\n\nplt.show()"
180+
"from itertools import cycle\n\nimport matplotlib.pyplot as plt\n\n# setup plot details\ncolors = cycle([\"navy\", \"turquoise\", \"darkorange\", \"cornflowerblue\", \"teal\"])\n\n_, ax = plt.subplots(figsize=(7, 8))\n\nf_scores = np.linspace(0.2, 0.8, num=4)\nlines, labels = [], []\nfor f_score in f_scores:\n x = np.linspace(0.01, 1)\n y = f_score * x / (2 * x - f_score)\n (l,) = plt.plot(x[y >= 0], y[y >= 0], color=\"gray\", alpha=0.2)\n plt.annotate(\"f1={0:0.1f}\".format(f_score), xy=(0.9, y[45] + 0.02))\n\ndisplay = PrecisionRecallDisplay(\n recall=recall[\"micro\"],\n precision=precision[\"micro\"],\n average_precision=average_precision[\"micro\"],\n)\ndisplay.plot(ax=ax, name=\"Micro-average precision-recall\", color=\"gold\")\n\nfor i, color in zip(range(n_classes), colors):\n display = PrecisionRecallDisplay(\n recall=recall[i],\n precision=precision[i],\n average_precision=average_precision[i],\n )\n display.plot(\n ax=ax, name=f\"Precision-recall for class {i}\", color=color, despine=True\n )\n\n# add the legend for the iso-f1 curves\nhandles, labels = display.ax_.get_legend_handles_labels()\nhandles.extend([l])\nlabels.extend([\"iso-f1 curves\"])\n# set the legend and the axes\nax.legend(handles=handles, labels=labels, loc=\"best\")\nax.set_title(\"Extension of Precision-Recall curve to multi-class\")\n\nplt.show()"
181181
]
182182
}
183183
],
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/80fef09514fd851560e999a5b7daa303/plot_roc.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@
131131
name=f"{class_of_interest} vs the rest",
132132
color="darkorange",
133133
plot_chance_level=True,
134+
despine=True,
134135
)
135136
_ = display.ax_.set(
136137
xlabel="False Positive Rate",
@@ -166,6 +167,7 @@
166167
name="micro-average OvR",
167168
color="darkorange",
168169
plot_chance_level=True,
170+
despine=True,
169171
)
170172
_ = display.ax_.set(
171173
xlabel="False Positive Rate",
@@ -285,6 +287,7 @@
285287
color=color,
286288
ax=ax,
287289
plot_chance_level=(class_id == 2),
290+
despine=True,
288291
)
289292

290293
_ = ax.set(
@@ -366,6 +369,7 @@
366369
ax=ax,
367370
name=f"{label_b} as positive class",
368371
plot_chance_level=True,
372+
despine=True,
369373
)
370374
ax.set(
371375
xlabel="False Positive Rate",
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/98161c8b335acb98de356229c1005819/plot_precision_recall.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@
147147
from sklearn.metrics import PrecisionRecallDisplay
148148

149149
display = PrecisionRecallDisplay.from_estimator(
150-
classifier, X_test, y_test, name="LinearSVC", plot_chance_level=True
150+
classifier, X_test, y_test, name="LinearSVC", plot_chance_level=True, despine=True
151151
)
152152
_ = display.ax_.set_title("2-class Precision-Recall curve")
153153

@@ -158,7 +158,7 @@
158158
y_score = classifier.decision_function(X_test)
159159

160160
display = PrecisionRecallDisplay.from_predictions(
161-
y_test, y_score, name="LinearSVC", plot_chance_level=True
161+
y_test, y_score, name="LinearSVC", plot_chance_level=True, despine=True
162162
)
163163
_ = display.ax_.set_title("2-class Precision-Recall curve")
164164

@@ -228,7 +228,7 @@
228228
average_precision=average_precision["micro"],
229229
prevalence_pos_label=Counter(Y_test.ravel())[1] / Y_test.size,
230230
)
231-
display.plot(plot_chance_level=True)
231+
display.plot(plot_chance_level=True, despine=True)
232232
_ = display.ax_.set_title("Micro-averaged over all classes")
233233

234234
# %%
@@ -264,7 +264,9 @@
264264
precision=precision[i],
265265
average_precision=average_precision[i],
266266
)
267-
display.plot(ax=ax, name=f"Precision-recall for class {i}", color=color)
267+
display.plot(
268+
ax=ax, name=f"Precision-recall for class {i}", color=color, despine=True
269+
)
268270

269271
# add the legend for the iso-f1 curves
270272
handles, labels = display.ax_.get_legend_handles_labels()

0 commit comments

Comments
 (0)