Skip to content

Commit 1e88899

Browse files
Bordapre-commit-ci[bot]lantiga
authored
bump python 3.9+ (Lightning-AI#20413)
* bump python 3.9+ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * --unsafe-fixes * contextlib.AbstractContextManager * type: ignore[misc] * update CI * apply fixes * apply fixes --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Luca Antiga <[email protected]>
1 parent 045f1bf commit 1e88899

File tree

234 files changed

+1479
-1368
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

234 files changed

+1479
-1368
lines changed

.actions/assistant.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,11 @@
1818
import shutil
1919
import tempfile
2020
import urllib.request
21+
from collections.abc import Iterable, Iterator, Sequence
2122
from itertools import chain
2223
from os.path import dirname, isfile
2324
from pathlib import Path
24-
from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Tuple
25+
from typing import Any, Optional
2526

2627
from packaging.requirements import Requirement
2728
from packaging.version import Version
@@ -127,7 +128,7 @@ def _parse_requirements(lines: Iterable[str]) -> Iterator[_RequirementWithCommen
127128
pip_argument = None
128129

129130

130-
def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str = "all") -> List[str]:
131+
def load_requirements(path_dir: str, file_name: str = "base.txt", unfreeze: str = "all") -> list[str]:
131132
"""Loading requirements from a file.
132133
133134
>>> path_req = os.path.join(_PROJECT_ROOT, "requirements")
@@ -222,7 +223,7 @@ def _load_aggregate_requirements(req_dir: str = "requirements", freeze_requireme
222223
fp.writelines([ln + os.linesep for ln in requires] + [os.linesep])
223224

224225

225-
def _retrieve_files(directory: str, *ext: str) -> List[str]:
226+
def _retrieve_files(directory: str, *ext: str) -> list[str]:
226227
all_files = []
227228
for root, _, files in os.walk(directory):
228229
for fname in files:
@@ -232,7 +233,7 @@ def _retrieve_files(directory: str, *ext: str) -> List[str]:
232233
return all_files
233234

234235

235-
def _replace_imports(lines: List[str], mapping: List[Tuple[str, str]], lightning_by: str = "") -> List[str]:
236+
def _replace_imports(lines: list[str], mapping: list[tuple[str, str]], lightning_by: str = "") -> list[str]:
236237
"""Replace imports of standalone package to lightning.
237238
238239
>>> lns = [
@@ -320,7 +321,7 @@ def copy_replace_imports(
320321
fo.writelines(lines)
321322

322323

323-
def create_mirror_package(source_dir: str, package_mapping: Dict[str, str]) -> None:
324+
def create_mirror_package(source_dir: str, package_mapping: dict[str, str]) -> None:
324325
"""Create a mirror package with adjusted imports."""
325326
# replace imports and copy the code
326327
mapping = package_mapping.copy()

.github/workflows/_legacy-checkpoints.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ jobs:
6060
- uses: actions/setup-python@v5
6161
with:
6262
# Python version here needs to be supported by all PL versions listed in back-compatible-versions.txt.
63-
python-version: 3.8
63+
python-version: "3.9"
6464

6565
- name: Install PL from source
6666
env:

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ repos:
7474
hooks:
7575
# try to fix what is possible
7676
- id: ruff
77-
args: ["--fix"]
77+
args: ["--fix", "--unsafe-fixes"]
7878
# perform formatting updates
7979
- id: ruff-format
8080
# validate if all is fine with preview mode

docs/source-pytorch/accelerators/tpu_faq.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ Unsupported datatype transfer to TPUs?
4040

4141
.. code-block::
4242
43-
File "/usr/local/lib/python3.8/dist-packages/torch_xla/utils/utils.py", line 205, in _for_each_instance_rewrite
43+
File "/usr/local/lib/python3.9/dist-packages/torch_xla/utils/utils.py", line 205, in _for_each_instance_rewrite
4444
v = _for_each_instance_rewrite(result.__dict__[k], select_fn, fn, rwmap)
45-
File "/usr/local/lib/python3.8/dist-packages/torch_xla/utils/utils.py", line 206, in _for_each_instance_rewrite
45+
File "/usr/local/lib/python3.9/dist-packages/torch_xla/utils/utils.py", line 206, in _for_each_instance_rewrite
4646
result.__dict__[k] = v
4747
TypeError: 'mappingproxy' object does not support item assignment
4848

docs/source-pytorch/advanced/post_training_quantization.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ Installation
3333
Prerequisites
3434
=============
3535

36-
Python version: 3.8, 3.9, 3.10
36+
Python version: 3.9, 3.10
3737

3838
Install Intel® Neural Compressor
3939
================================

examples/fabric/build_your_own_trainer/trainer.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import os
2-
from collections.abc import Mapping
2+
from collections.abc import Iterable, Mapping
33
from functools import partial
4-
from typing import Any, Iterable, List, Literal, Optional, Tuple, Union, cast
4+
from typing import Any, Literal, Optional, Union, cast
55

66
import lightning as L
77
import torch
@@ -19,11 +19,11 @@ def __init__(
1919
self,
2020
accelerator: Union[str, Accelerator] = "auto",
2121
strategy: Union[str, Strategy] = "auto",
22-
devices: Union[List[int], str, int] = "auto",
22+
devices: Union[list[int], str, int] = "auto",
2323
precision: Union[str, int] = "32-true",
2424
plugins: Optional[Union[str, Any]] = None,
25-
callbacks: Optional[Union[List[Any], Any]] = None,
26-
loggers: Optional[Union[Logger, List[Logger]]] = None,
25+
callbacks: Optional[Union[list[Any], Any]] = None,
26+
loggers: Optional[Union[Logger, list[Logger]]] = None,
2727
max_epochs: Optional[int] = 1000,
2828
max_steps: Optional[int] = None,
2929
grad_accum_steps: int = 1,
@@ -465,7 +465,7 @@ def get_latest_checkpoint(checkpoint_dir: str) -> Optional[str]:
465465

466466
def _parse_optimizers_schedulers(
467467
self, configure_optim_output
468-
) -> Tuple[
468+
) -> tuple[
469469
Optional[L.fabric.utilities.types.Optimizable],
470470
Optional[Mapping[str, Union[L.fabric.utilities.types.LRScheduler, bool, str, int]]],
471471
]:

examples/fabric/reinforcement_learning/rl/agent.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import math
2-
from typing import Dict, Tuple
32

43
import gymnasium as gym
54
import torch
@@ -43,7 +42,7 @@ def __init__(self, envs: gym.vector.SyncVectorEnv, act_fun: str = "relu", ortho_
4342
layer_init(torch.nn.Linear(64, envs.single_action_space.n), std=0.01, ortho_init=ortho_init),
4443
)
4544

46-
def get_action(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor]:
45+
def get_action(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor]:
4746
logits = self.actor(x)
4847
distribution = Categorical(logits=logits)
4948
if action is None:
@@ -58,12 +57,12 @@ def get_greedy_action(self, x: Tensor) -> Tensor:
5857
def get_value(self, x: Tensor) -> Tensor:
5958
return self.critic(x)
6059

61-
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
60+
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
6261
action, log_prob, entropy = self.get_action(x, action)
6362
value = self.get_value(x)
6463
return action, log_prob, entropy, value
6564

66-
def forward(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
65+
def forward(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
6766
return self.get_action_and_value(x, action)
6867

6968
@torch.no_grad()
@@ -77,7 +76,7 @@ def estimate_returns_and_advantages(
7776
num_steps: int,
7877
gamma: float,
7978
gae_lambda: float,
80-
) -> Tuple[Tensor, Tensor]:
79+
) -> tuple[Tensor, Tensor]:
8180
next_value = self.get_value(next_obs).reshape(1, -1)
8281
advantages = torch.zeros_like(rewards)
8382
lastgaelam = 0
@@ -143,7 +142,7 @@ def __init__(
143142
self.avg_value_loss = MeanMetric(**torchmetrics_kwargs)
144143
self.avg_ent_loss = MeanMetric(**torchmetrics_kwargs)
145144

146-
def get_action(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor]:
145+
def get_action(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor]:
147146
logits = self.actor(x)
148147
distribution = Categorical(logits=logits)
149148
if action is None:
@@ -158,12 +157,12 @@ def get_greedy_action(self, x: Tensor) -> Tensor:
158157
def get_value(self, x: Tensor) -> Tensor:
159158
return self.critic(x)
160159

161-
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
160+
def get_action_and_value(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
162161
action, log_prob, entropy = self.get_action(x, action)
163162
value = self.get_value(x)
164163
return action, log_prob, entropy, value
165164

166-
def forward(self, x: Tensor, action: Tensor = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
165+
def forward(self, x: Tensor, action: Tensor = None) -> tuple[Tensor, Tensor, Tensor, Tensor]:
167166
return self.get_action_and_value(x, action)
168167

169168
@torch.no_grad()
@@ -177,7 +176,7 @@ def estimate_returns_and_advantages(
177176
num_steps: int,
178177
gamma: float,
179178
gae_lambda: float,
180-
) -> Tuple[Tensor, Tensor]:
179+
) -> tuple[Tensor, Tensor]:
181180
next_value = self.get_value(next_obs).reshape(1, -1)
182181
advantages = torch.zeros_like(rewards)
183182
lastgaelam = 0
@@ -193,7 +192,7 @@ def estimate_returns_and_advantages(
193192
returns = advantages + values
194193
return returns, advantages
195194

196-
def training_step(self, batch: Dict[str, Tensor]):
195+
def training_step(self, batch: dict[str, Tensor]):
197196
# Get actions and values given the current observations
198197
_, newlogprob, entropy, newvalue = self(batch["obs"], batch["actions"].long())
199198
logratio = newlogprob - batch["logprobs"]

examples/fabric/reinforcement_learning/train_fabric.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
import os
2222
import time
2323
from datetime import datetime
24-
from typing import Dict
2524

2625
import gymnasium as gym
2726
import torch
@@ -38,7 +37,7 @@ def train(
3837
fabric: Fabric,
3938
agent: PPOLightningAgent,
4039
optimizer: torch.optim.Optimizer,
41-
data: Dict[str, Tensor],
40+
data: dict[str, Tensor],
4241
global_step: int,
4342
args: argparse.Namespace,
4443
):

examples/fabric/reinforcement_learning/train_torch.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
import random
2323
import time
2424
from datetime import datetime
25-
from typing import Dict
2625

2726
import gymnasium as gym
2827
import torch
@@ -41,7 +40,7 @@
4140
def train(
4241
agent: PPOAgent,
4342
optimizer: torch.optim.Optimizer,
44-
data: Dict[str, Tensor],
43+
data: dict[str, Tensor],
4544
logger: SummaryWriter,
4645
global_step: int,
4746
args: argparse.Namespace,

examples/fabric/tensor_parallel/model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010

1111
from dataclasses import dataclass
12-
from typing import Optional, Tuple
12+
from typing import Optional
1313

1414
import torch
1515
import torch.nn.functional as F
@@ -87,7 +87,7 @@ def apply_rotary_emb(
8787
xq: torch.Tensor,
8888
xk: torch.Tensor,
8989
freqs_cis: torch.Tensor,
90-
) -> Tuple[torch.Tensor, torch.Tensor]:
90+
) -> tuple[torch.Tensor, torch.Tensor]:
9191
"""Apply rotary embeddings to input tensors using the given frequency tensor.
9292
9393
This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided

examples/pytorch/basics/autoencoder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"""
1919

2020
from os import path
21-
from typing import Optional, Tuple
21+
from typing import Optional
2222

2323
import torch
2424
import torch.nn.functional as F
@@ -45,7 +45,7 @@ def __init__(
4545
nrow: int = 8,
4646
padding: int = 2,
4747
normalize: bool = True,
48-
value_range: Optional[Tuple[int, int]] = None,
48+
value_range: Optional[tuple[int, int]] = None,
4949
scale_each: bool = False,
5050
pad_value: int = 0,
5151
) -> None:

examples/pytorch/domain_templates/reinforce_learn_Qnet.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
import argparse
3636
import random
3737
from collections import OrderedDict, deque, namedtuple
38-
from typing import Iterator, List, Tuple
38+
from collections.abc import Iterator
3939

4040
import gym
4141
import torch
@@ -102,7 +102,7 @@ def append(self, experience: Experience) -> None:
102102
"""
103103
self.buffer.append(experience)
104104

105-
def sample(self, batch_size: int) -> Tuple:
105+
def sample(self, batch_size: int) -> tuple:
106106
indices = random.sample(range(len(self.buffer)), batch_size)
107107
states, actions, rewards, dones, next_states = zip(*(self.buffer[idx] for idx in indices))
108108

@@ -190,7 +190,7 @@ def get_action(self, net: nn.Module, epsilon: float, device: str) -> int:
190190
return action
191191

192192
@torch.no_grad()
193-
def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = "cpu") -> Tuple[float, bool]:
193+
def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = "cpu") -> tuple[float, bool]:
194194
"""Carries out a single interaction step between the agent and the environment.
195195
196196
Args:
@@ -295,7 +295,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
295295
"""
296296
return self.net(x)
297297

298-
def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
298+
def dqn_mse_loss(self, batch: tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
299299
"""Calculates the mse loss using a mini batch from the replay buffer.
300300
301301
Args:
@@ -318,7 +318,7 @@ def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor
318318

319319
return nn.MSELoss()(state_action_values, expected_state_action_values)
320320

321-
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], nb_batch) -> OrderedDict:
321+
def training_step(self, batch: tuple[torch.Tensor, torch.Tensor], nb_batch) -> OrderedDict:
322322
"""Carries out a single step through the environment to update the replay buffer. Then calculates loss based on
323323
the minibatch received.
324324
@@ -356,7 +356,7 @@ def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], nb_batch) -> O
356356

357357
return OrderedDict({"loss": loss, "log": log, "progress_bar": log})
358358

359-
def configure_optimizers(self) -> List[Optimizer]:
359+
def configure_optimizers(self) -> list[Optimizer]:
360360
"""Initialize Adam optimizer."""
361361
optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
362362
return [optimizer]

examples/pytorch/domain_templates/reinforce_learn_ppo.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@
3030
"""
3131

3232
import argparse
33-
from typing import Callable, Iterator, List, Tuple
33+
from collections.abc import Iterator
34+
from typing import Callable
3435

3536
import gym
3637
import torch
@@ -41,7 +42,7 @@
4142
from torch.utils.data import DataLoader, IterableDataset
4243

4344

44-
def create_mlp(input_shape: Tuple[int], n_actions: int, hidden_size: int = 128):
45+
def create_mlp(input_shape: tuple[int], n_actions: int, hidden_size: int = 128):
4546
"""Simple Multi-Layer Perceptron network."""
4647
return nn.Sequential(
4748
nn.Linear(input_shape[0], hidden_size),
@@ -227,7 +228,7 @@ def __init__(
227228

228229
self.state = torch.FloatTensor(self.env.reset())
229230

230-
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
231+
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
231232
"""Passes in a state x through the network and returns the policy and a sampled action.
232233
233234
Args:
@@ -242,7 +243,7 @@ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Te
242243

243244
return pi, action, value
244245

245-
def discount_rewards(self, rewards: List[float], discount: float) -> List[float]:
246+
def discount_rewards(self, rewards: list[float], discount: float) -> list[float]:
246247
"""Calculate the discounted rewards of all rewards in list.
247248
248249
Args:
@@ -263,7 +264,7 @@ def discount_rewards(self, rewards: List[float], discount: float) -> List[float]
263264

264265
return list(reversed(cumul_reward))
265266

266-
def calc_advantage(self, rewards: List[float], values: List[float], last_value: float) -> List[float]:
267+
def calc_advantage(self, rewards: list[float], values: list[float], last_value: float) -> list[float]:
267268
"""Calculate the advantage given rewards, state values, and the last value of episode.
268269
269270
Args:
@@ -281,7 +282,7 @@ def calc_advantage(self, rewards: List[float], values: List[float], last_value:
281282
delta = [rews[i] + self.gamma * vals[i + 1] - vals[i] for i in range(len(rews) - 1)]
282283
return self.discount_rewards(delta, self.gamma * self.lam)
283284

284-
def generate_trajectory_samples(self) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:
285+
def generate_trajectory_samples(self) -> tuple[list[torch.Tensor], list[torch.Tensor], list[torch.Tensor]]:
285286
"""
286287
Contains the logic for generating trajectory data to train policy and value network
287288
Yield:
@@ -375,7 +376,7 @@ def critic_loss(self, state, action, logp_old, qval, adv) -> torch.Tensor:
375376
value = self.critic(state)
376377
return (qval - value).pow(2).mean()
377378

378-
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor]):
379+
def training_step(self, batch: tuple[torch.Tensor, torch.Tensor]):
379380
"""Carries out a single update to actor and critic network from a batch of replay buffer.
380381
381382
Args:
@@ -406,7 +407,7 @@ def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor]):
406407
self.log("loss_critic", loss_critic, on_step=False, on_epoch=True, prog_bar=False, logger=True)
407408
self.log("loss_actor", loss_actor, on_step=False, on_epoch=True, prog_bar=True, logger=True)
408409

409-
def configure_optimizers(self) -> List[Optimizer]:
410+
def configure_optimizers(self) -> list[Optimizer]:
410411
"""Initialize Adam optimizer."""
411412
optimizer_actor = torch.optim.Adam(self.actor.parameters(), lr=self.lr_actor)
412413
optimizer_critic = torch.optim.Adam(self.critic.parameters(), lr=self.lr_critic)

0 commit comments

Comments
 (0)