Skip to content

Commit 010b705

Browse files
[pre-commit.ci] pre-commit autoupdate (#2166)
* [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pycqa/isort: 5.13.2 → 6.0.1](PyCQA/isort@5.13.2...6.0.1) - [github.com/psf/black.git: 24.10.0 → 25.1.0](https://github.com/psf/black.git/compare/24.10.0...25.1.0) - [github.com/codespell-project/codespell: v2.3.0 → v2.4.1](codespell-project/codespell@v2.3.0...v2.4.1) - [github.com/astral-sh/ruff-pre-commit: v0.8.6 → v0.11.4](astral-sh/ruff-pre-commit@v0.8.6...v0.11.4) Signed-off-by: Sun, Xuehao <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent e9bd2e7 commit 010b705

File tree

8 files changed

+21
-21
lines changed

8 files changed

+21
-21
lines changed

.pre-commit-config.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ repos:
7272
name: Unused noqa
7373

7474
- repo: https://github.com/pycqa/isort
75-
rev: 5.13.2
75+
rev: 6.0.1
7676
hooks:
7777
- id: isort
7878
exclude: |
@@ -101,7 +101,7 @@ repos:
101101
)$
102102
103103
- repo: https://github.com/psf/black.git
104-
rev: 24.10.0
104+
rev: 25.1.0
105105
hooks:
106106
- id: black
107107
files: (.*\.py)$
@@ -130,7 +130,7 @@ repos:
130130
)$
131131
132132
- repo: https://github.com/codespell-project/codespell
133-
rev: v2.3.0
133+
rev: v2.4.1
134134
hooks:
135135
- id: codespell
136136
args: [-w]
@@ -149,7 +149,7 @@ repos:
149149
)$
150150
151151
- repo: https://github.com/astral-sh/ruff-pre-commit
152-
rev: v0.8.6
152+
rev: v0.11.4
153153
hooks:
154154
- id: ruff
155155
args: [--fix, --exit-non-zero-on-fix, --no-cache]

examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ We also upstreamed several INT8 models into HuggingFace [model hub](https://hugg
105105
2. User specifies fp32 'model', calibration dataset 'q_dataloader' and a custom "eval_func" which encapsulates the evaluation dataset and metrics by itself.
106106

107107
## 2. Code Prepare
108-
We update `run_glue.py` like belows:
108+
We update `run_glue.py` like below:
109109

110110
```python
111111
trainer = QuestionAnsweringTrainer(

neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ def clear_memo(self):
483483
The memo is the data structure that remembers which objects the
484484
pickler has already seen, so that shared or recursive objects
485485
are pickled by reference and not by value. This method is
486-
useful when re-using picklers.
486+
useful when reusing picklers.
487487
"""
488488
self.memo.clear()
489489

neural_compressor/common/utils/utility.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,13 +108,13 @@ def __init__(self):
108108
max_extension_support = cpuid.get_max_extension_support()
109109
if max_extension_support >= 7:
110110
ecx = cpuid._run_asm(
111-
b"\x31\xC9", # xor ecx, ecx
112-
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret
111+
b"\x31\xc9", # xor ecx, ecx
112+
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret
113113
)
114114
self._vnni = bool(ecx & (1 << 11))
115115
eax = cpuid._run_asm(
116-
b"\xB9\x01\x00\x00\x00", # mov ecx, 1
117-
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret
116+
b"\xb9\x01\x00\x00\x00", # mov ecx, 1
117+
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret
118118
)
119119
self._bf16 = bool(eax & (1 << 5))
120120
self._info = info

neural_compressor/tensorflow/utils/utility.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -220,13 +220,13 @@ def __init__(self):
220220
max_extension_support = cpuid.get_max_extension_support()
221221
if max_extension_support >= 7:
222222
ecx = cpuid._run_asm(
223-
b"\x31\xC9", # xor ecx, ecx
224-
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret
223+
b"\x31\xc9", # xor ecx, ecx
224+
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret
225225
)
226226
self._vnni = bool(ecx & (1 << 11))
227227
eax = cpuid._run_asm(
228-
b"\xB9\x01\x00\x00\x00", # mov ecx, 1
229-
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret
228+
b"\xb9\x01\x00\x00\x00", # mov ecx, 1
229+
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret
230230
)
231231
self._bf16 = bool(eax & (1 << 5))
232232
if "arch" in info and "ARM" in info["arch"]: # pragma: no cover

neural_compressor/torch/algorithms/layer_wise/modified_pickle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -482,7 +482,7 @@ def clear_memo(self):
482482
The memo is the data structure that remembers which objects the
483483
pickler has already seen, so that shared or recursive objects
484484
are pickled by reference and not by value. This method is
485-
useful when re-using picklers.
485+
useful when reusing picklers.
486486
"""
487487
self.memo.clear()
488488

neural_compressor/utils/utility.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -239,13 +239,13 @@ def __init__(self):
239239
max_extension_support = cpuid.get_max_extension_support()
240240
if max_extension_support >= 7:
241241
ecx = cpuid._run_asm(
242-
b"\x31\xC9", # xor ecx, ecx
243-
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret
242+
b"\x31\xc9", # xor ecx, ecx
243+
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret
244244
)
245245
self._vnni = bool(ecx & (1 << 11))
246246
eax = cpuid._run_asm(
247-
b"\xB9\x01\x00\x00\x00", # mov ecx, 1
248-
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret
247+
b"\xb9\x01\x00\x00\x00", # mov ecx, 1
248+
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret
249249
)
250250
self._bf16 = bool(eax & (1 << 5))
251251
self._info = info

test/adaptor/onnxrt_adaptor/test_onnxrt_operators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1965,7 +1965,7 @@ def test_fp16(self):
19651965
weights = [["input2", TensorProto.FLOAT, [2], np.random.random((2))]]
19661966
node_infos = [["test", ["input1", "input2"], ["output"], optype, "com.microsoft"]]
19671967
model = self.build_model(inps, outs, weights, node_infos)
1968-
input_data = self.build_test_data(["input1"], [(2)], ["float32"])
1968+
input_data = self.build_test_data(["input1"], [2], ["float32"])
19691969
convert_model = self.get_fp16_mixed_precision_model(model)
19701970
self.assertTrue("Cast" in set([i.op_type for i in convert_model.nodes()]))
19711971
self.assertTrue(10 in set([i.attribute[0].i for i in convert_model.nodes() if i.op_type == "Cast"]))
@@ -2190,7 +2190,7 @@ def test_bf16(self):
21902190
weights = [["input2", TensorProto.FLOAT, [2], np.random.random((2))]]
21912191
node_infos = [["test", ["input1", "input2"], ["output"], optype, "com.microsoft"]]
21922192
model = self.build_model(inps, outs, weights, node_infos)
2193-
input_data = self.build_test_data(["input1"], [(2)], ["float32"])
2193+
input_data = self.build_test_data(["input1"], [2], ["float32"])
21942194
convert_model = self.get_bf16_mixed_precision_model(model)
21952195
self.assertTrue("Cast" in set([i.op_type for i in convert_model.nodes()]))
21962196
self.assertTrue(16 in set([i.attribute[0].i for i in convert_model.nodes() if i.op_type == "Cast"]))

0 commit comments

Comments
 (0)