Skip to content

Commit edc5656

Browse files
committed
Pushing the docs to dev/ for branch: main, commit 84ac5d882d80a1edf4ea39f3f9266ad28758f1da
1 parent a4ca8b5 commit edc5656

File tree

1,559 files changed

+5987
-5991
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,559 files changed

+5987
-5991
lines changed
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/2da0534ab0e0c8241033bcc2d912e419/plot_classifier_comparison.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464
max_depth=5, n_estimators=10, max_features=1, random_state=42
6565
),
6666
MLPClassifier(alpha=1, max_iter=1000, random_state=42),
67-
AdaBoostClassifier(algorithm="SAMME", random_state=42),
67+
AdaBoostClassifier(random_state=42),
6868
GaussianNB(),
6969
QuadraticDiscriminantAnalysis(),
7070
]
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/3438aba177365cb595921cf18806dfa7/plot_classifier_comparison.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import make_circles, make_classification, make_moons\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.inspection import DecisionBoundaryDisplay\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nnames = [\n \"Nearest Neighbors\",\n \"Linear SVM\",\n \"RBF SVM\",\n \"Gaussian Process\",\n \"Decision Tree\",\n \"Random Forest\",\n \"Neural Net\",\n \"AdaBoost\",\n \"Naive Bayes\",\n \"QDA\",\n]\n\nclassifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025, random_state=42),\n SVC(gamma=2, C=1, random_state=42),\n GaussianProcessClassifier(1.0 * RBF(1.0), random_state=42),\n DecisionTreeClassifier(max_depth=5, random_state=42),\n RandomForestClassifier(\n max_depth=5, n_estimators=10, max_features=1, random_state=42\n ),\n MLPClassifier(alpha=1, max_iter=1000, random_state=42),\n AdaBoostClassifier(algorithm=\"SAMME\", random_state=42),\n GaussianNB(),\n QuadraticDiscriminantAnalysis(),\n]\n\nX, y = make_classification(\n n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1\n)\nrng = np.random.RandomState(2)\nX += 2 * rng.uniform(size=X.shape)\nlinearly_separable = (X, y)\n\ndatasets = [\n make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable,\n]\n\nfigure = plt.figure(figsize=(27, 9))\ni = 1\n# iterate over datasets\nfor ds_cnt, ds in enumerate(datasets):\n # preprocess dataset, split into training and test part\n X, y = ds\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.4, random_state=42\n )\n\n x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap([\"#FF0000\", \"#0000FF\"])\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\")\n # Plot the testing points\n ax.scatter(\n X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors=\"k\"\n )\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n\n clf = make_pipeline(StandardScaler(), clf)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n DecisionBoundaryDisplay.from_estimator(\n clf, X, cmap=cm, alpha=0.8, ax=ax, eps=0.5\n )\n\n # Plot the training points\n ax.scatter(\n X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\"\n )\n # Plot the testing points\n ax.scatter(\n X_test[:, 0],\n X_test[:, 1],\n c=y_test,\n cmap=cm_bright,\n edgecolors=\"k\",\n alpha=0.6,\n )\n\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n if ds_cnt == 0:\n ax.set_title(name)\n ax.text(\n x_max - 0.3,\n y_min + 0.3,\n (\"%.2f\" % score).lstrip(\"0\"),\n size=15,\n horizontalalignment=\"right\",\n )\n i += 1\n\nplt.tight_layout()\nplt.show()"
18+
"# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import make_circles, make_classification, make_moons\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.inspection import DecisionBoundaryDisplay\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\n\nnames = [\n \"Nearest Neighbors\",\n \"Linear SVM\",\n \"RBF SVM\",\n \"Gaussian Process\",\n \"Decision Tree\",\n \"Random Forest\",\n \"Neural Net\",\n \"AdaBoost\",\n \"Naive Bayes\",\n \"QDA\",\n]\n\nclassifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025, random_state=42),\n SVC(gamma=2, C=1, random_state=42),\n GaussianProcessClassifier(1.0 * RBF(1.0), random_state=42),\n DecisionTreeClassifier(max_depth=5, random_state=42),\n RandomForestClassifier(\n max_depth=5, n_estimators=10, max_features=1, random_state=42\n ),\n MLPClassifier(alpha=1, max_iter=1000, random_state=42),\n AdaBoostClassifier(random_state=42),\n GaussianNB(),\n QuadraticDiscriminantAnalysis(),\n]\n\nX, y = make_classification(\n n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1\n)\nrng = np.random.RandomState(2)\nX += 2 * rng.uniform(size=X.shape)\nlinearly_separable = (X, y)\n\ndatasets = [\n make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable,\n]\n\nfigure = plt.figure(figsize=(27, 9))\ni = 1\n# iterate over datasets\nfor ds_cnt, ds in enumerate(datasets):\n # preprocess dataset, split into training and test part\n X, y = ds\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.4, random_state=42\n )\n\n x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap([\"#FF0000\", \"#0000FF\"])\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\")\n # Plot the testing points\n ax.scatter(\n X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors=\"k\"\n )\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n\n clf = make_pipeline(StandardScaler(), clf)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n DecisionBoundaryDisplay.from_estimator(\n clf, X, cmap=cm, alpha=0.8, ax=ax, eps=0.5\n )\n\n # Plot the training points\n ax.scatter(\n X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\"\n )\n # Plot the testing points\n ax.scatter(\n X_test[:, 0],\n X_test[:, 1],\n c=y_test,\n cmap=cm_bright,\n edgecolors=\"k\",\n alpha=0.6,\n )\n\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xticks(())\n ax.set_yticks(())\n if ds_cnt == 0:\n ax.set_title(name)\n ax.text(\n x_max - 0.3,\n y_min + 0.3,\n (\"%.2f\" % score).lstrip(\"0\"),\n size=15,\n horizontalalignment=\"right\",\n )\n i += 1\n\nplt.tight_layout()\nplt.show()"
1919
]
2020
}
2121
],
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/4e46f015ab8300f262e6e8775bcdcf8a/plot_adaboost_multiclass.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@
8080
adaboost_clf = AdaBoostClassifier(
8181
estimator=weak_learner,
8282
n_estimators=n_estimators,
83-
algorithm="SAMME",
8483
random_state=42,
8584
).fit(X_train, y_train)
8685

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/607c99671400a5055ef516d1aabd00c1/plot_adaboost_multiclass.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969
},
7070
"outputs": [],
7171
"source": [
72-
"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nweak_learner = DecisionTreeClassifier(max_leaf_nodes=8)\nn_estimators = 300\n\nadaboost_clf = AdaBoostClassifier(\n estimator=weak_learner,\n n_estimators=n_estimators,\n algorithm=\"SAMME\",\n random_state=42,\n).fit(X_train, y_train)"
72+
"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nweak_learner = DecisionTreeClassifier(max_leaf_nodes=8)\nn_estimators = 300\n\nadaboost_clf = AdaBoostClassifier(\n estimator=weak_learner,\n n_estimators=n_estimators,\n random_state=42,\n).fit(X_train, y_train)"
7373
]
7474
},
7575
{
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/7b2d436e94933f77577bba1962762f33/plot_forest_iris.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -74,11 +74,7 @@
7474
DecisionTreeClassifier(max_depth=None),
7575
RandomForestClassifier(n_estimators=n_estimators),
7676
ExtraTreesClassifier(n_estimators=n_estimators),
77-
AdaBoostClassifier(
78-
DecisionTreeClassifier(max_depth=3),
79-
n_estimators=n_estimators,
80-
algorithm="SAMME",
81-
),
77+
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators),
8278
]
8379

8480
for pair in ([0, 1], [0, 2], [2, 3]):
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

dev/_downloads/985f759e90739932d90206b8295630d6/plot_forest_iris.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
},
1616
"outputs": [],
1717
"source": [
18-
"# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import (\n AdaBoostClassifier,\n ExtraTreesClassifier,\n RandomForestClassifier,\n)\nfrom sklearn.tree import DecisionTreeClassifier\n\n# Parameters\nn_classes = 3\nn_estimators = 30\ncmap = plt.cm.RdYlBu\nplot_step = 0.02 # fine step width for decision surface contours\nplot_step_coarser = 0.5 # step widths for coarse classifier guesses\nRANDOM_SEED = 13 # fix the seed on each iteration\n\n# Load data\niris = load_iris()\n\nplot_idx = 1\n\nmodels = [\n DecisionTreeClassifier(max_depth=None),\n RandomForestClassifier(n_estimators=n_estimators),\n ExtraTreesClassifier(n_estimators=n_estimators),\n AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=3),\n n_estimators=n_estimators,\n algorithm=\"SAMME\",\n ),\n]\n\nfor pair in ([0, 1], [0, 2], [2, 3]):\n for model in models:\n # We only take the two corresponding features\n X = iris.data[:, pair]\n y = iris.target\n\n # Shuffle\n idx = np.arange(X.shape[0])\n np.random.seed(RANDOM_SEED)\n np.random.shuffle(idx)\n X = X[idx]\n y = y[idx]\n\n # Standardize\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n X = (X - mean) / std\n\n # Train\n model.fit(X, y)\n\n scores = model.score(X, y)\n # Create a title for each column and the console by using str() and\n # slicing away useless parts of the string\n model_title = str(type(model)).split(\".\")[-1][:-2][: -len(\"Classifier\")]\n\n model_details = model_title\n if hasattr(model, \"estimators_\"):\n model_details += \" with {} estimators\".format(len(model.estimators_))\n print(model_details + \" with features\", pair, \"has a score of\", scores)\n\n plt.subplot(3, 4, plot_idx)\n if plot_idx <= len(models):\n # Add a title at the top of each column\n plt.title(model_title, fontsize=9)\n\n # Now plot the decision boundary using a fine mesh as input to a\n # filled contour plot\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(\n np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)\n )\n\n # Plot either a single DecisionTreeClassifier or alpha blend the\n # decision surfaces of the ensemble of classifiers\n if isinstance(model, DecisionTreeClassifier):\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=cmap)\n else:\n # Choose alpha blend level with respect to the number\n # of estimators\n # that are in use (noting that AdaBoost can use fewer estimators\n # than its maximum if it achieves a good enough fit early on)\n estimator_alpha = 1.0 / len(model.estimators_)\n for tree in model.estimators_:\n Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)\n\n # Build a coarser grid to plot a set of ensemble classifications\n # to show how these are different to what we see in the decision\n # surfaces. These points are regularly space and do not have a\n # black outline\n xx_coarser, yy_coarser = np.meshgrid(\n np.arange(x_min, x_max, plot_step_coarser),\n np.arange(y_min, y_max, plot_step_coarser),\n )\n Z_points_coarser = model.predict(\n np.c_[xx_coarser.ravel(), yy_coarser.ravel()]\n ).reshape(xx_coarser.shape)\n cs_points = plt.scatter(\n xx_coarser,\n yy_coarser,\n s=15,\n c=Z_points_coarser,\n cmap=cmap,\n edgecolors=\"none\",\n )\n\n # Plot the training points, these are clustered together and have a\n # black outline\n plt.scatter(\n X[:, 0],\n X[:, 1],\n c=y,\n cmap=ListedColormap([\"r\", \"y\", \"b\"]),\n edgecolor=\"k\",\n s=20,\n )\n plot_idx += 1 # move on to the next plot in sequence\n\nplt.suptitle(\"Classifiers on feature subsets of the Iris dataset\", fontsize=12)\nplt.axis(\"tight\")\nplt.tight_layout(h_pad=0.2, w_pad=0.2, pad=2.5)\nplt.show()"
18+
"# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import (\n AdaBoostClassifier,\n ExtraTreesClassifier,\n RandomForestClassifier,\n)\nfrom sklearn.tree import DecisionTreeClassifier\n\n# Parameters\nn_classes = 3\nn_estimators = 30\ncmap = plt.cm.RdYlBu\nplot_step = 0.02 # fine step width for decision surface contours\nplot_step_coarser = 0.5 # step widths for coarse classifier guesses\nRANDOM_SEED = 13 # fix the seed on each iteration\n\n# Load data\niris = load_iris()\n\nplot_idx = 1\n\nmodels = [\n DecisionTreeClassifier(max_depth=None),\n RandomForestClassifier(n_estimators=n_estimators),\n ExtraTreesClassifier(n_estimators=n_estimators),\n AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators),\n]\n\nfor pair in ([0, 1], [0, 2], [2, 3]):\n for model in models:\n # We only take the two corresponding features\n X = iris.data[:, pair]\n y = iris.target\n\n # Shuffle\n idx = np.arange(X.shape[0])\n np.random.seed(RANDOM_SEED)\n np.random.shuffle(idx)\n X = X[idx]\n y = y[idx]\n\n # Standardize\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n X = (X - mean) / std\n\n # Train\n model.fit(X, y)\n\n scores = model.score(X, y)\n # Create a title for each column and the console by using str() and\n # slicing away useless parts of the string\n model_title = str(type(model)).split(\".\")[-1][:-2][: -len(\"Classifier\")]\n\n model_details = model_title\n if hasattr(model, \"estimators_\"):\n model_details += \" with {} estimators\".format(len(model.estimators_))\n print(model_details + \" with features\", pair, \"has a score of\", scores)\n\n plt.subplot(3, 4, plot_idx)\n if plot_idx <= len(models):\n # Add a title at the top of each column\n plt.title(model_title, fontsize=9)\n\n # Now plot the decision boundary using a fine mesh as input to a\n # filled contour plot\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(\n np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)\n )\n\n # Plot either a single DecisionTreeClassifier or alpha blend the\n # decision surfaces of the ensemble of classifiers\n if isinstance(model, DecisionTreeClassifier):\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, cmap=cmap)\n else:\n # Choose alpha blend level with respect to the number\n # of estimators\n # that are in use (noting that AdaBoost can use fewer estimators\n # than its maximum if it achieves a good enough fit early on)\n estimator_alpha = 1.0 / len(model.estimators_)\n for tree in model.estimators_:\n Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)\n\n # Build a coarser grid to plot a set of ensemble classifications\n # to show how these are different to what we see in the decision\n # surfaces. These points are regularly space and do not have a\n # black outline\n xx_coarser, yy_coarser = np.meshgrid(\n np.arange(x_min, x_max, plot_step_coarser),\n np.arange(y_min, y_max, plot_step_coarser),\n )\n Z_points_coarser = model.predict(\n np.c_[xx_coarser.ravel(), yy_coarser.ravel()]\n ).reshape(xx_coarser.shape)\n cs_points = plt.scatter(\n xx_coarser,\n yy_coarser,\n s=15,\n c=Z_points_coarser,\n cmap=cmap,\n edgecolors=\"none\",\n )\n\n # Plot the training points, these are clustered together and have a\n # black outline\n plt.scatter(\n X[:, 0],\n X[:, 1],\n c=y,\n cmap=ListedColormap([\"r\", \"y\", \"b\"]),\n edgecolor=\"k\",\n s=20,\n )\n plot_idx += 1 # move on to the next plot in sequence\n\nplt.suptitle(\"Classifiers on feature subsets of the Iris dataset\", fontsize=12)\nplt.axis(\"tight\")\nplt.tight_layout(h_pad=0.2, w_pad=0.2, pad=2.5)\nplt.show()"
1919
]
2020
}
2121
],
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.

0 commit comments

Comments
 (0)