Skip to content

Commit 9d5b9ad

Browse files
authored
Add back AOPerModuleConfig for BC (#2282)
Summary: att, just temporary so that integrations keep working Test Plan: tests in other libs Reviewers: Subscribers: Tasks: Tags:
1 parent e51ffd9 commit 9d5b9ad

File tree

2 files changed

+7
-3
lines changed

2 files changed

+7
-3
lines changed

test/integration/test_vllm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717
import torch
1818

1919
from packaging import version
20-
from torchao.utils import TORCH_VERSION_AT_LEAST_2_7
20+
from torchao.utils import TORCH_VERSION_AT_LEAST_2_8
2121

22-
if not TORCH_VERSION_AT_LEAST_2_7:
23-
pytest.skip("Requires PyTorch 2.7 or higher", allow_module_level=True)
22+
if not TORCH_VERSION_AT_LEAST_2_8:
23+
pytest.skip("Requires PyTorch 2.8 or higher", allow_module_level=True)
2424

2525

2626
VLLM_AVAILABLE = importlib.util.find_spec("vllm") is not None

torchao/quantization/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,9 @@
108108
)
109109
from .weight_only import WeightOnlyInt8QuantLinear
110110

111+
# TODO: remove after migration of APIs are done
112+
AOPerModuleConfig = ModuleFqnToConfig
113+
111114
__all__ = [
112115
# top level API - auto
113116
"autoquant",
@@ -148,6 +151,7 @@
148151
"IntxWeightOnlyConfig",
149152
"FPXWeightOnlyConfig",
150153
"GemliteUIntXWeightOnlyConfig",
154+
"AOPerModuleConfig",
151155
"ModuleFqnToConfig",
152156
"FbgemmConfig",
153157
# smooth quant - subject to change

0 commit comments

Comments
 (0)