Skip to content

Commit 8a4d0ba

Browse files
OSSEVALML-80 Python 3.10 audit (#3609)
* OSSEVALML-80_python-3-10 * OSSEVALML-80 Added Dockerfile, .dockerignore * OSSEVALML-80 small update to Dockerfile * OSSEVALML-80 release notes * OSSEVALML-80 updated release notes with PR * OSSEVALML-80 format fix * OSSEVALML-80 updated release notes * OSSEVALML-80 update install.md - Dockerfile.arm reference for M1 * Lint. Co-authored-by: Karsten Chu <[email protected]> Co-authored-by: chukarsten <[email protected]>
1 parent 4ebe97a commit 8a4d0ba

File tree

9 files changed

+95
-16
lines changed

9 files changed

+95
-16
lines changed

.dockerignore

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
__pycache__
2+
*.pyc
3+
*.pyo
4+
*.pyd
5+
.Python
6+
env
7+
pip-log.txt
8+
pip-delete-this-directory.txt
9+
.tox
10+
.coverage
11+
.coverage.*
12+
.cache
13+
nosetests.xml
14+
coverage.xml
15+
*.cover
16+
*.log
17+
.mypy_cache
18+
.pytest_cache
19+
.hypothesis

Dockerfile.arm

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Dockerfile for creating an image for testing python versions
2+
#
3+
# Notes:
4+
# - this is not an optimized Dockerfile
5+
# - it was built to provide a clean environment for running
6+
# tests on a mac M1, but could probably work on an older mac as well
7+
# - when running the tests, the makefile never exits at the end. For now,
8+
# just kill the container
9+
# Build: docker build --rm -t evalml_test . -f Dockerfile.arm
10+
# Test: docker run --rm -it evalml_test make test
11+
ARG VERSION=3.10-buster
12+
FROM --platform=linux/x86_64 python:$VERSION
13+
14+
ARG DOCKER_ARM=1
15+
ARG TIMEOUT=1200
16+
17+
RUN apt-get update && apt-get install -y git graphviz && rm -rf /var/lib/apt/lists/*
18+
19+
# Referenced in test_gen_utils.py to create a fixture, limiting chromium to a
20+
# single process. This is needed for kaleido in a container on an M1
21+
ENV DOCKER_ARM=$DOCKER_ARM
22+
23+
# Set the test timeout
24+
ENV TIMEOUT=$TIMEOUT
25+
26+
WORKDIR /workspace
27+
28+
COPY . .
29+
30+
RUN make installdeps-dev && make installdeps-prophet
31+
32+
CMD ['bin/bash']

Makefile

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
TIMEOUT ?= 300
2+
13
.PHONY: clean
24
clean:
35
find . -name '*.pyo' -delete
@@ -24,44 +26,44 @@ lint-fix:
2426

2527
.PHONY: test
2628
test:
27-
pytest evalml/ --doctest-modules --doctest-continue-on-failure --timeout 300
29+
pytest evalml/ --doctest-modules --doctest-continue-on-failure --timeout $(TIMEOUT)
2830

2931
.PHONY: test-no-parallel
3032
test-no-parallel:
31-
pytest evalml/ --doctest-modules --doctest-continue-on-failure --ignore=evalml/tests/automl_tests/parallel_tests --timeout 300
33+
pytest evalml/ --doctest-modules --doctest-continue-on-failure --ignore=evalml/tests/automl_tests/parallel_tests --timeout $(TIMEOUT)
3234

3335
.PHONY: test-parallel
3436
test-parallel:
35-
pytest evalml/tests/automl_tests/parallel_tests/ --timeout 300 --durations 0
37+
pytest evalml/tests/automl_tests/parallel_tests/ --timeout $(TIMEOUT) --durations 0
3638

3739
.PHONY: doctests
3840
doctests:
3941
pytest evalml --ignore evalml/tests -n 2 --durations 0 --doctest-modules --doctest-continue-on-failure
4042

4143
.PHONY: git-test-parallel
4244
git-test-parallel:
43-
pytest evalml/tests/automl_tests/parallel_tests/ -n 1 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-parallel-junit.xml --timeout 300 --durations 0
45+
pytest evalml/tests/automl_tests/parallel_tests/ -n 1 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-parallel-junit.xml --timeout $(TIMEOUT) --durations 0
4446

4547
.PHONY: git-test-automl
4648
git-test-automl:
47-
pytest evalml/tests/automl_tests evalml/tests/tuner_tests -n 2 --ignore=evalml/tests/automl_tests/parallel_tests --durations 0 --timeout 300 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-automl-junit.xml
49+
pytest evalml/tests/automl_tests evalml/tests/tuner_tests -n 2 --ignore=evalml/tests/automl_tests/parallel_tests --durations 0 --timeout $(TIMEOUT) --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-automl-junit.xml
4850

4951
.PHONY: git-test-modelunderstanding
5052
git-test-modelunderstanding:
51-
pytest evalml/tests/model_understanding_tests -n 2 --durations 0 --timeout 300 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-modelunderstanding-junit.xml
53+
pytest evalml/tests/model_understanding_tests -n 2 --durations 0 --timeout $(TIMEOUT) --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-modelunderstanding-junit.xml
5254

5355
.PHONY: git-test-other
5456
git-test-other:
55-
pytest evalml/tests --ignore evalml/tests/automl_tests/ --ignore evalml/tests/tuner_tests/ --ignore evalml/tests/model_understanding_tests/ --ignore evalml/tests/pipeline_tests/ --ignore evalml/tests/utils_tests/ --ignore evalml/tests/component_tests/test_prophet_regressor.py --ignore evalml/tests/component_tests/test_components.py --ignore evalml/tests/component_tests/test_utils.py --ignore evalml/tests/integration_tests/ -n 2 --durations 0 --timeout 300 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-other-junit.xml
57+
pytest evalml/tests --ignore evalml/tests/automl_tests/ --ignore evalml/tests/tuner_tests/ --ignore evalml/tests/model_understanding_tests/ --ignore evalml/tests/pipeline_tests/ --ignore evalml/tests/utils_tests/ --ignore evalml/tests/component_tests/test_prophet_regressor.py --ignore evalml/tests/component_tests/test_components.py --ignore evalml/tests/component_tests/test_utils.py --ignore evalml/tests/integration_tests/ -n 2 --durations 0 --timeout $(TIMEOUT) --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-other-junit.xml
5658
make doctests
5759

5860
.PHONY: git-test-prophet
5961
git-test-prophet:
60-
pytest evalml/tests/component_tests/test_prophet_regressor.py evalml/tests/component_tests/test_components.py evalml/tests/component_tests/test_utils.py evalml/tests/pipeline_tests/ evalml/tests/utils_tests/ -n 2 --durations 0 --timeout 300 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-prophet-junit.xml
62+
pytest evalml/tests/component_tests/test_prophet_regressor.py evalml/tests/component_tests/test_components.py evalml/tests/component_tests/test_utils.py evalml/tests/pipeline_tests/ evalml/tests/utils_tests/ -n 2 --durations 0 --timeout $(TIMEOUT) --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-prophet-junit.xml
6163

6264
.PHONY: git-test-integration
6365
git-test-integration:
64-
pytest evalml/tests/integration_tests -n 2 --durations 0 --timeout 300 --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-integration-junit.xml
66+
pytest evalml/tests/integration_tests -n 2 --durations 0 --timeout $(TIMEOUT) --cov=evalml --cov-config=pyproject.toml --junitxml=test-reports/git-test-integration-junit.xml
6567

6668

6769
.PHONY: installdeps

docs/source/install.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ $ conda install -c conda-forge alteryx-open-src-update-checker
9393
```
9494
````
9595

96-
## Time Series support with Facebook's Prophet
96+
## Time Series support with Facebook's Prophet
9797

9898
To support the `Prophet` time series estimator, be sure to install it as an extra requirement. Please note that this may take a few minutes.
9999
Prophet is currently only supported via pip installation in EvalML for Mac with CmdStan as a backend.
@@ -152,8 +152,10 @@ brew install graphviz
152152

153153
Not all of EvalML's dependencies support Apple's new M1 chip. For this reason, `pip` or `conda` installing EvalML will
154154
fail. The core set of EvalML dependencies can be installed in the M1 chip, so we recommend you install EvalML with core
155-
dependencies.
155+
dependencies.
156156

157157
Alternatively, there is experimental support for M1 chips with the Rosetta terminal. After setting up a Rosetta terminal, you should be able to `pip` or `conda` install EvalML.
158158

159+
For Docker fans, an included `Dockerfile.arm` can be built and run to provide an environment for testing. Details are included within.
160+
159161
+++

docs/source/release_notes.rst

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,15 @@ Release Notes
2929
* Added the option to set the ``sp`` parameter for ARIMA models :pr:`3597`
3030
* Updated the CV split size of time series problems to match forecast horizon for improved performance :pr:`3616`
3131
* Added holdout set evaluation as part of AutoML search and pipeline ranking :pr:`3499`
32+
* Added Dockerfile.arm and .dockerignore for python version and M1 testing :pr:`3609`
33+
* Added ``test_gen_utils::in_container_arm64()`` fixture :pr:`3609`
3234
* Fixes
3335
* Fixed iterative graphs not appearing in documentation :pr:`3592`
3436
* Updated the ``load_diabetes()`` method to account for scikit-learn 1.1.1 changes to the dataset :pr:`3591`
3537
* Capped woodwork version at < 0.17.0 :pr:`3612`
3638
* Bump minimum scikit-optimize version to 0.9.0 `:pr:`3614`
3739
* Invalid target data checks involving regression and unsupported data types now produce a different ``DataCheckMessageCode`` :pr:`3630`
40+
* Updated ``test_data_checks.py::test_data_checks_raises_value_errors_on_init`` - more lenient text check :pr:`3609`
3841
* Changes
3942
* Add pre-commit hooks for linting :pr:`3608`
4043
* Implemented a lower threshold and window size for the ``TimeSeriesRegularizer`` and ``DatetimeFormatDataCheck`` :pr:`3627`
@@ -43,6 +46,13 @@ Release Notes
4346
* Testing Changes
4447
* Pinned GraphViz version for Windows CI Test :pr:`3596`
4548
* Removed ``pytest.mark.skip_if_39`` pytest marker :pr:`3602` :pr:`3607`
49+
* Updated pytest==7.1.2 :pr:`3609`
50+
* Added Dockerfile.arm and .dockerignore for python version and M1 testing :pr:`3609`
51+
* Added ``test_gen_utils::in_container_arm64()`` fixture :pr:`3609`
52+
53+
.. warning::
54+
55+
**Breaking Changes**
4656
* Refactored test cases that iterate over all components to use ``pytest.mark.parametrise`` and changed the corresponding ``if...continue`` blocks to ``pytest.mark.xfail`` :pr:`3622`
4757

4858

evalml/tests/data_checks_tests/test_data_checks.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -638,19 +638,19 @@ def validate(self, X, y=None):
638638
[MockCheck],
639639
{"mock_check": {"foo": 1}},
640640
DataCheckInitError,
641-
r"Encountered the following error while initializing mock_check: __init__\(\) missing 1 required positional argument: 'bar'",
641+
r"Encountered the following error while initializing mock_check: .*__init__\(\) missing 1 required positional argument: 'bar'",
642642
),
643643
(
644644
[MockCheck],
645645
{"mock_check": {"Bar": 2}},
646646
DataCheckInitError,
647-
r"Encountered the following error while initializing mock_check: __init__\(\) got an unexpected keyword argument 'Bar'",
647+
r"Encountered the following error while initializing mock_check: .*__init__\(\) got an unexpected keyword argument 'Bar'",
648648
),
649649
(
650650
[MockCheck],
651651
{"mock_check": {"fo": 3, "ba": 4}},
652652
DataCheckInitError,
653-
r"Encountered the following error while initializing mock_check: __init__\(\) got an unexpected keyword argument 'fo'",
653+
r"Encountered the following error while initializing mock_check: .*__init__\(\) got an unexpected keyword argument 'fo'",
654654
),
655655
(
656656
[MockCheck],

evalml/tests/dependency_update_check/minimum_test_requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ pmdarima==1.8.1
2626
pytest-cov==2.10.1
2727
pytest-timeout==1.4.2
2828
pytest-xdist==2.1.0
29-
pytest==6.0.1
29+
pytest==7.1.2
3030
pyzmq==20.0.0
3131
requirements-parser==0.2.0
3232
scikit-learn==1.1.2

evalml/tests/utils_tests/test_gen_utils.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,19 @@
3030
)
3131

3232

33+
@pytest.fixture(scope="module")
34+
def in_container_arm64():
35+
"""Helper fixture to run chromium as a single process for kaleido.
36+
37+
The env var is set in the Dockerfile.arm for the purposes of local
38+
testing in a container on a mac M1, otherwise it's a noop.
39+
"""
40+
if os.getenv("DOCKER_ARM", None):
41+
import plotly.io as pio
42+
43+
pio.kaleido.scope.chromium_args += ("--single-process",)
44+
45+
3346
@patch("importlib.import_module")
3447
def test_import_or_raise_errors(dummy_importlib):
3548
def _mock_import_function(library_str):
@@ -345,6 +358,7 @@ def test_rename_column_names_to_numeric():
345358
],
346359
)
347360
def test_save_plotly_static_default_format(
361+
in_container_arm64,
348362
file_name,
349363
format,
350364
interactive,

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ dev =
8787
%(docs)s
8888

8989
test =
90-
pytest == 6.0.1
90+
pytest == 7.1.2
9191
pytest-xdist == 2.1.0
9292
pytest-timeout == 1.4.2
9393
pytest-cov == 2.10.1

0 commit comments

Comments
 (0)