diff --git a/.github/workflows/build-wheels-aarch64-linux.yml b/.github/workflows/build-wheels-aarch64-linux.yml index abc378f9061..b8729058ec8 100644 --- a/.github/workflows/build-wheels-aarch64-linux.yml +++ b/.github/workflows/build-wheels-aarch64-linux.yml @@ -32,7 +32,7 @@ jobs: test-infra-ref: main with-cuda: disabled with-rocm: disabled - python-versions: '["3.10", "3.11", "3.12"]' + python-versions: '["3.10", "3.11", "3.12", "3.13"]' build: needs: generate-matrix diff --git a/.github/workflows/build-wheels-linux.yml b/.github/workflows/build-wheels-linux.yml index 8509ba52cb9..a149c4f5df0 100644 --- a/.github/workflows/build-wheels-linux.yml +++ b/.github/workflows/build-wheels-linux.yml @@ -32,7 +32,7 @@ jobs: test-infra-ref: main with-cuda: disabled with-rocm: disabled - python-versions: '["3.10", "3.11", "3.12"]' + python-versions: '["3.10", "3.11", "3.12", "3.13"]' build: needs: generate-matrix diff --git a/.github/workflows/build-wheels-macos.yml b/.github/workflows/build-wheels-macos.yml index 8db10c0335b..16da31ddd6d 100644 --- a/.github/workflows/build-wheels-macos.yml +++ b/.github/workflows/build-wheels-macos.yml @@ -32,7 +32,7 @@ jobs: test-infra-ref: main with-cuda: disabled with-rocm: disabled - python-versions: '["3.10", "3.11", "3.12"]' + python-versions: '["3.10", "3.11", "3.12", "3.13"]' build: needs: generate-matrix diff --git a/.github/workflows/build-wheels-windows.yml b/.github/workflows/build-wheels-windows.yml index 276edfb08d1..9bf9b71f693 100644 --- a/.github/workflows/build-wheels-windows.yml +++ b/.github/workflows/build-wheels-windows.yml @@ -27,7 +27,7 @@ jobs: test-infra-ref: main with-cuda: disabled with-rocm: disabled - python-versions: '["3.10", "3.11", "3.12"]' + python-versions: '["3.10", "3.11", "3.12", "3.13"]' build: needs: generate-matrix diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index c9dd6a0b734..d3803aae6e9 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -22,7 +22,7 @@ jobs: # strategy: # fail-fast: false # matrix: - # python-version: [ "3.10", "3.11", "3.12" ] + # python-version: [ "3.10", "3.11", "3.12", "3.13" ] # with: # runner: linux.2xlarge # docker-image: ci-image:executorch-ubuntu-22.04-qnn-sdk diff --git a/README-wheel.md b/README-wheel.md index 719f753039f..a1e70a2daef 100644 --- a/README-wheel.md +++ b/README-wheel.md @@ -5,7 +5,7 @@ ExecuTorch is to enable wider customization and deployment capabilities of the PyTorch programs. The `executorch` pip package is in beta. -* Supported python versions: 3.10, 3.11, 3.12 +* Supported python versions: 3.10, 3.11, 3.12, 3.13 * Compatible systems: Linux x86_64, macOS aarch64 The prebuilt `executorch.runtime` module included in this package provides a way diff --git a/docs/source/backends/coreml/coreml-troubleshooting.md b/docs/source/backends/coreml/coreml-troubleshooting.md index d2b2a614836..0c764b9d51b 100644 --- a/docs/source/backends/coreml/coreml-troubleshooting.md +++ b/docs/source/backends/coreml/coreml-troubleshooting.md @@ -7,11 +7,6 @@ This page describes common issues that you may encounter when using the Core ML This happens because the model is in FP16, but Core ML interprets some of the arguments as FP32, which leads to a type mismatch. The solution is to keep the PyTorch model in FP32. Note that the model will be still be converted to FP16 during lowering to Core ML unless specified otherwise in the compute_precision [Core ML `CompileSpec`](coreml-partitioner.md#coreml-compilespec). Also see the [related issue in coremltools](https://github.com/apple/coremltools/issues/2480). -2. coremltools/converters/mil/backend/mil/load.py", line 499, in export - raise RuntimeError("BlobWriter not loaded") - -If you're using Python 3.13, try reducing your python version to Python 3.12. coremltools does not support Python 3.13 per [coremltools issue #2487](https://github.com/apple/coremltools/issues/2487). - ### Issues during runtime 1. [ETCoreMLModelCompiler.mm:55] [Core ML] Failed to compile model, error = Error Domain=com.apple.mlassetio Code=1 "Failed to parse the model specification. Error: Unable to parse ML Program: at unknown location: Unknown opset 'CoreML7'." UserInfo={NSLocalizedDescription=Failed to par$ diff --git a/docs/source/getting-started.md b/docs/source/getting-started.md index c421c0a7a46..7e5d658a559 100644 --- a/docs/source/getting-started.md +++ b/docs/source/getting-started.md @@ -8,7 +8,7 @@ This section is intended to describe the necessary steps to take a PyTorch model ## System Requirements The following are required to install the ExecuTorch host libraries, needed to export models and run from Python. Requirements for target end-user devices are backend dependent. See the appropriate backend documentation for more information. -- Python 3.10 - 3.12 +- Python 3.10 - 3.13 - g++ version 7 or higher, clang++ version 5 or higher, or another C++17-compatible toolchain. - Linux (x86_64 or ARM64), macOS (ARM64), or Windows (x86_64). - Intel-based macOS systems require building PyTorch from source (see [Building From Source](using-executorch-building-from-source.md) for instructions). diff --git a/docs/source/quick-start-section.md b/docs/source/quick-start-section.md index b35bed8d22c..b6940d2acef 100644 --- a/docs/source/quick-start-section.md +++ b/docs/source/quick-start-section.md @@ -17,7 +17,7 @@ Follow these guides in order to get started with ExecuTorch: ## Prerequisites -- Python 3.10-3.12 +- Python 3.10-3.13 - PyTorch 2.9+ - Basic familiarity with PyTorch model development diff --git a/docs/source/raspberry_pi_llama_tutorial.md b/docs/source/raspberry_pi_llama_tutorial.md index e37bbb61c06..1e886db694a 100644 --- a/docs/source/raspberry_pi_llama_tutorial.md +++ b/docs/source/raspberry_pi_llama_tutorial.md @@ -4,7 +4,7 @@ This tutorial demonstrates how to deploy **Llama models on Raspberry Pi 4/5 devices** using ExecuTorch: -- **Prerequisites**: Linux host machine, Python 3.10-3.12, conda environment, Raspberry Pi 4/5 +- **Prerequisites**: Linux host machine, Python 3.10-3.13, conda environment, Raspberry Pi 4/5 - **Setup**: Automated cross-compilation using `setup.sh` script for ARM toolchain installation - **Export**: Convert Llama models to optimized `.pte` format with quantization options - **Deploy**: Transfer binaries to Raspberry Pi and configure runtime libraries @@ -19,7 +19,7 @@ This tutorial demonstrates how to deploy **Llama models on Raspberry Pi 4/5 devi **Software Dependencies**: -- **Python 3.10-3.12** (ExecuTorch requirement) +- **Python 3.10-3.13** (ExecuTorch requirement) - **conda** or **venv** for environment management - **CMake 3.29.6+** - **Git** for repository cloning @@ -42,7 +42,7 @@ uname -s # Should output: Linux uname -m # Should output: x86_64 # Check Python version -python3 --version # Should be 3.10-3.12 +python3 --version # Should be 3.10-3.13 # Check required tools hash cmake git md5sum 2>/dev/null || echo "Missing required tools" diff --git a/docs/source/using-executorch-building-from-source.md b/docs/source/using-executorch-building-from-source.md index 09980a94cdc..8e1772086de 100644 --- a/docs/source/using-executorch-building-from-source.md +++ b/docs/source/using-executorch-building-from-source.md @@ -28,7 +28,7 @@ ExecuTorch is tested on the following systems, although it should also work in s - Otherwise, Python's built-in virtual environment manager `python venv` is a good alternative. * `g++` version 7 or higher, `clang++` version 5 or higher, or another C++17-compatible toolchain. -* `python` version 3.10-3.12 +* `python` version 3.10-3.13 * `ccache` (optional) - A compiler cache that speeds up recompilation * **macOS** - `Xcode Command Line Tools` diff --git a/pyproject.toml b/pyproject.toml index 79b442aa37b..3d0fa75f80b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,11 +47,12 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ] # Python dependencies required for use. # coremltools has issue with python 3.13, see https://github.com/apple/coremltools/issues/2487 -requires-python = ">=3.10,<3.13" +requires-python = ">=3.10,<3.14" dependencies=[ "expecttest", "flatbuffers", @@ -132,4 +133,4 @@ first_party_detection = false # Emit syntax compatible with older versions of python instead of only the range # specified by `requires-python`. TODO: Remove this once we support these older # versions of python and can expand the `requires-python` range. -target-version = ["py38", "py39", "py310", "py311", "py312"] +target-version = ["py38", "py39", "py310", "py311", "py312", "py313"]