Skip to content

Commit 1969528

Browse files
committed
Fix dtype log when default (None) is used w/o AMP
1 parent 92f610c commit 1969528

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -597,7 +597,7 @@ def main():
597597
_logger.info('Using native Torch AMP. Training in mixed precision.')
598598
else:
599599
if utils.is_primary(args):
600-
_logger.info(f'AMP not enabled. Training in {model_dtype}.')
600+
_logger.info(f'AMP not enabled. Training in {model_dtype or torch.float32}.')
601601

602602
# optionally resume from a checkpoint
603603
resume_epoch = None

validate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ def validate(args):
192192
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
193193
_logger.info('Validating in mixed precision with native PyTorch AMP.')
194194
else:
195-
_logger.info(f'Validating in {model_dtype}. AMP not enabled.')
195+
_logger.info(f'Validating in {model_dtype or torch.float32}. AMP not enabled.')
196196

197197
if args.fuser:
198198
set_jit_fuser(args.fuser)

0 commit comments

Comments
 (0)