Skip to content
This repository was archived by the owner on Jul 1, 2025. It is now read-only.

Commit 9ecc93a

Browse files
amyreesefacebook-github-bot
authored andcommitted
apply Black 2024 style in fbcode (8/16)
Reviewed By: aleivag Differential Revision: D54447737 fbshipit-source-id: 6c05d7941c6b4f1787b8da6cf810693f48a96c4e
1 parent f3cdb7a commit 9ecc93a

File tree

7 files changed

+33
-17
lines changed

7 files changed

+33
-17
lines changed

torch_glow/tests/functionality/to_glow_num_devices_to_use_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def devices_to_use_test_helper(self, input, num_replications):
4242

4343
self.assertEqual(type(g), type(t))
4444
self.assertEqual(len(g), len(t))
45-
for (gi, ti) in zip(g, t):
45+
for gi, ti in zip(g, t):
4646
self.assertTrue(torch.allclose(gi, ti))
4747

4848
def devices_to_use_test(self):

torch_glow/tests/functionality/to_glow_tuple_output_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def tuple_test_helper(self, ModType):
5555
self.assertEqual(type(g), type(t))
5656
self.assertEqual(len(g), len(t))
5757

58-
for (gi, ti) in zip(g, t):
58+
for gi, ti in zip(g, t):
5959
self.assertTrue(torch.allclose(gi, ti))
6060

6161
# test module ser/de with tuple output

torch_glow/tests/functionality/to_glow_write_to_onnx_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def lower_and_write_to_onnx_helper(self, ModType, onnx_prefix):
8585
self.assertEqual(type(g), type(t))
8686
self.assertEqual(len(g), len(t))
8787

88-
for (gi, ti) in zip(g, t):
88+
for gi, ti in zip(g, t):
8989
self.assertTrue(torch.allclose(gi, ti))
9090

9191
assert os.path.exists(onnx_prefix + ".onnxtxt")

torch_glow/tests/nodes/embedding_bag_test.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,11 @@ class TestQuantizedEmbeddingBag(utils.TorchGlowTestCase):
6363
bits="_4bit" if is4bit else "_byte",
6464
weighted="_weighted" if is_weighted else "",
6565
fp16="_fp16" if use_fp16 else "",
66-
sample_weights="_sample_weights_fp16"
67-
if per_sample_weights_fp16 and is_weighted
68-
else "",
66+
sample_weights=(
67+
"_sample_weights_fp16"
68+
if per_sample_weights_fp16 and is_weighted
69+
else ""
70+
),
6971
backend="_" + DEFAULT_BACKEND,
7072
),
7173
num_lengths,
@@ -157,9 +159,11 @@ def forward(self, indices, offsets):
157159
indices,
158160
offsets,
159161
fusible_ops={
160-
"quantized::embedding_bag_4bit_rowwise_offsets"
161-
if is4bit
162-
else "quantized::embedding_bag_byte_rowwise_offsets"
162+
(
163+
"quantized::embedding_bag_4bit_rowwise_offsets"
164+
if is4bit
165+
else "quantized::embedding_bag_byte_rowwise_offsets"
166+
)
163167
},
164168
fp16=use_fp16,
165169
# FP16 version is known to yeild different results, so our

torch_glow/tests/utils.py

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,22 @@ def ephemeral_torchglow_settings(
6262
torch_glow.setGlowBackend(backend)
6363
yield
6464
finally:
65-
torch_glow.enable_convert_to_fp16() if old_fp16 else torch_glow.disable_convert_to_fp16()
65+
(
66+
torch_glow.enable_convert_to_fp16()
67+
if old_fp16
68+
else torch_glow.disable_convert_to_fp16()
69+
)
6670
torch_glow.enable_clip_fp16() if old_clip else torch_glow.disable_clip_fp16()
67-
torch_glow.enable_convert_fused_to_fp16() if old_convert_fused else torch_glow.disable_convert_fused_to_fp16()
68-
torch_glow.enableFusionPass_DO_NOT_USE_THIS() if old_fusion else torch_glow.disableFusionPass()
71+
(
72+
torch_glow.enable_convert_fused_to_fp16()
73+
if old_convert_fused
74+
else torch_glow.disable_convert_fused_to_fp16()
75+
)
76+
(
77+
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
78+
if old_fusion
79+
else torch_glow.disableFusionPass()
80+
)
6981
torch_glow.setGlowBackend(old_backend)
7082
torch_glow.setFusionBlocklist(old_blocklist)
7183

utils/caffe2_pb_runner.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,9 @@ def mode_128to127(x):
132132
for h in range(0, model_props[MODEL].image_size):
133133
for c in range(0, model_props[MODEL].num_color_channels):
134134
# WHC -> CWH, RGB -> BGR
135-
transposed_image[0][model_props[MODEL].num_color_channels - c - 1][w][
136-
h
137-
] = model_props[MODEL].image_mode_op(img[w][h][c])
135+
transposed_image[0][model_props[MODEL].num_color_channels - c - 1][w][h] = (
136+
model_props[MODEL].image_mode_op(img[w][h][c])
137+
)
138138

139139
final_image = transposed_image
140140

utils/trace_parser.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,13 +166,13 @@ def dumpAccumulate(events, keyfunc, traceTime):
166166
nameMap[name].append(ev.selfTime())
167167

168168
layers = []
169-
for (name, times) in nameMap.items():
169+
for name, times in nameMap.items():
170170
layers.append(
171171
(name, len(times), numpy.mean(times), numpy.std(times), numpy.sum(times))
172172
)
173173

174174
# Iterate sorted by total time.
175-
for (name, num, mean, stddev, total) in sorted(
175+
for name, num, mean, stddev, total in sorted(
176176
layers, key=itemgetter(4), reverse=True
177177
):
178178
mean = formatUs(mean)

0 commit comments

Comments
 (0)