Skip to content

Commit 31c9ba1

Browse files
TroyGardenfacebook-github-bot
authored andcommitted
convert stride_per_key_per_rank to tensor inside KJT (#2959)
Summary: # context * this diff is part of the "variable-batch KJT refactoring" project ([doc](https://fburl.com/gdoc/svfysfai)) * previously the `stride_per_key_per_rank` variable is `List[List[int]] | None` which can't be handled correctly in PT2 IR (torch.export) * this change makes the KJT class variable `_stride_per_key_per_rank` as `torch.IntTensor | None` so it would be compatible with PT2 IR. # equivalency * to check if `self._stride_per_key_per_rank` is `None` this logic is used to differentiate variable_batch case, and should have the same behavior after this diff * to use `self._stride_per_key_per_rank` as `List[List[int]]` most of the callsite use the function to get the list: `def stride_per_key_per_rank(self) -> List[List[int]]:`, and this function is modified to covert the `torch.IntTensor` to list as ` _stride_per_key_per_rank.tolist()`, the results should be the same NOTE: currently this `self._stride_per_key_per_rank` tensor is always on CPU since it's effective the meta data of a KJT. However, ideally it should be on GPU side since it's after input_dist and we'll should avoid move it to cpu unless really need it. Reviewed By: jd7-tr Differential Revision: D74366343
1 parent 55dd08c commit 31c9ba1

File tree

3 files changed

+82
-47
lines changed

3 files changed

+82
-47
lines changed

torchrec/pt2/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def kjt_for_pt2_tracing(
5454
values=values,
5555
lengths=lengths,
5656
weights=kjt.weights_or_none(),
57-
stride_per_key_per_rank=[[stride]] * n,
57+
stride_per_key_per_rank=torch.IntTensor([[stride]] * n, device="cpu"),
5858
inverse_indices=(kjt.keys(), inverse_indices_tensor),
5959
)
6060

@@ -85,7 +85,7 @@ def kjt_for_pt2_tracing(
8585
lengths=lengths,
8686
weights=weights,
8787
stride=stride if not is_vb else None,
88-
stride_per_key_per_rank=kjt.stride_per_key_per_rank() if is_vb else None,
88+
stride_per_key_per_rank=kjt._stride_per_key_per_rank if is_vb else None,
8989
inverse_indices=inverse_indices,
9090
)
9191

torchrec/schema/api_tests/test_jagged_tensor_schema.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
import inspect
1111
import unittest
12-
from typing import Dict, List, Optional, Tuple
12+
from typing import Dict, List, Optional, Tuple, Union
1313

1414
import torch
1515
from torchrec.schema.utils import is_signature_compatible
@@ -112,7 +112,9 @@ def __init__(
112112
lengths: Optional[torch.Tensor] = None,
113113
offsets: Optional[torch.Tensor] = None,
114114
stride: Optional[int] = None,
115-
stride_per_key_per_rank: Optional[List[List[int]]] = None,
115+
stride_per_key_per_rank: Optional[
116+
Union[List[List[int]], torch.IntTensor]
117+
] = None,
116118
# Below exposed to ensure torch.script-able
117119
stride_per_key: Optional[List[int]] = None,
118120
length_per_key: Optional[List[int]] = None,

torchrec/sparse/jagged_tensor.py

Lines changed: 76 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1096,13 +1096,15 @@ def _maybe_compute_stride_kjt(
10961096
stride: Optional[int],
10971097
lengths: Optional[torch.Tensor],
10981098
offsets: Optional[torch.Tensor],
1099-
stride_per_key_per_rank: Optional[List[List[int]]],
1099+
stride_per_key_per_rank: Optional[torch.IntTensor],
11001100
) -> int:
11011101
if stride is None:
11021102
if len(keys) == 0:
11031103
stride = 0
1104-
elif stride_per_key_per_rank is not None and len(stride_per_key_per_rank) > 0:
1105-
stride = max([sum(s) for s in stride_per_key_per_rank])
1104+
elif (
1105+
stride_per_key_per_rank is not None and stride_per_key_per_rank.numel() > 0
1106+
):
1107+
stride = int(stride_per_key_per_rank.sum(dim=1).max().item())
11061108
elif offsets is not None and offsets.numel() > 0:
11071109
stride = (offsets.numel() - 1) // len(keys)
11081110
elif lengths is not None:
@@ -1481,8 +1483,8 @@ def _strides_from_kjt(
14811483
def _kjt_empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
14821484
# empty like function fx wrapped, also avoids device hardcoding
14831485
stride, stride_per_key_per_rank = (
1484-
(None, kjt.stride_per_key_per_rank())
1485-
if kjt.variable_stride_per_key()
1486+
(None, kjt._stride_per_key_per_rank)
1487+
if kjt._stride_per_key_per_rank is not None and kjt.variable_stride_per_key()
14861488
else (kjt.stride(), None)
14871489
)
14881490

@@ -1668,14 +1670,20 @@ def _maybe_compute_lengths_offset_per_key(
16681670

16691671
def _maybe_compute_stride_per_key(
16701672
stride_per_key: Optional[List[int]],
1671-
stride_per_key_per_rank: Optional[List[List[int]]],
1673+
stride_per_key_per_rank: Optional[torch.IntTensor],
16721674
stride: Optional[int],
16731675
keys: List[str],
16741676
) -> Optional[List[int]]:
16751677
if stride_per_key is not None:
16761678
return stride_per_key
16771679
elif stride_per_key_per_rank is not None:
1678-
return [sum(s) for s in stride_per_key_per_rank]
1680+
if stride_per_key_per_rank.dim() != 2:
1681+
# after permute the kjt could be empty
1682+
return []
1683+
rt: List[int] = stride_per_key_per_rank.sum(dim=1).tolist()
1684+
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
1685+
pt2_checks_all_is_size(rt)
1686+
return rt
16791687
elif stride is not None:
16801688
return [stride] * len(keys)
16811689
else:
@@ -1766,7 +1774,9 @@ def __init__(
17661774
lengths: Optional[torch.Tensor] = None,
17671775
offsets: Optional[torch.Tensor] = None,
17681776
stride: Optional[int] = None,
1769-
stride_per_key_per_rank: Optional[List[List[int]]] = None,
1777+
stride_per_key_per_rank: Optional[
1778+
Union[torch.IntTensor, List[List[int]]]
1779+
] = None,
17701780
# Below exposed to ensure torch.script-able
17711781
stride_per_key: Optional[List[int]] = None,
17721782
length_per_key: Optional[List[int]] = None,
@@ -1788,8 +1798,14 @@ def __init__(
17881798
self._lengths: Optional[torch.Tensor] = lengths
17891799
self._offsets: Optional[torch.Tensor] = offsets
17901800
self._stride: Optional[int] = stride
1791-
self._stride_per_key_per_rank: Optional[List[List[int]]] = (
1792-
stride_per_key_per_rank
1801+
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
1802+
# in pt2.compile the stride_per_key_per_rank has to be torch.Tensor or None
1803+
# does not take List[List[int]]
1804+
assert not isinstance(stride_per_key_per_rank, list)
1805+
self._stride_per_key_per_rank: Optional[torch.IntTensor] = (
1806+
torch.IntTensor(stride_per_key_per_rank, device="cpu")
1807+
if isinstance(stride_per_key_per_rank, list)
1808+
else stride_per_key_per_rank
17931809
)
17941810
self._stride_per_key: Optional[List[int]] = stride_per_key
17951811
self._length_per_key: Optional[List[int]] = length_per_key
@@ -1815,10 +1831,8 @@ def _init_pt2_checks(self) -> None:
18151831
return
18161832
if self._stride_per_key is not None:
18171833
pt2_checks_all_is_size(self._stride_per_key)
1818-
if self._stride_per_key_per_rank is not None:
1819-
# pyre-ignore [16]
1820-
for s in self._stride_per_key_per_rank:
1821-
pt2_checks_all_is_size(s)
1834+
# this is only needed for torch.compile case
1835+
self._pt2_stride_per_key_per_rank: Optional[List[List[int]]] = None
18221836

18231837
@staticmethod
18241838
def from_offsets_sync(
@@ -2028,7 +2042,7 @@ def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
20282042
kjt_stride, kjt_stride_per_key_per_rank = (
20292043
(stride_per_key[0], None)
20302044
if all(s == stride_per_key[0] for s in stride_per_key)
2031-
else (None, [[stride] for stride in stride_per_key])
2045+
else (None, torch.IntTensor(stride_per_key, device="cpu").reshape(-1, 1))
20322046
)
20332047
kjt = KeyedJaggedTensor(
20342048
keys=kjt_keys,
@@ -2193,12 +2207,32 @@ def stride_per_key_per_rank(self) -> List[List[int]]:
21932207
Returns:
21942208
List[List[int]]: stride per key per rank of the KeyedJaggedTensor.
21952209
"""
2196-
stride_per_key_per_rank = self._stride_per_key_per_rank
2197-
return stride_per_key_per_rank if stride_per_key_per_rank is not None else []
2210+
# making a local reference to the class variable to make jit.script behave
2211+
_stride_per_key_per_rank = self._stride_per_key_per_rank
2212+
if (
2213+
not torch.jit.is_scripting()
2214+
and is_torchdynamo_compiling()
2215+
and _stride_per_key_per_rank is not None
2216+
):
2217+
if self._pt2_stride_per_key_per_rank is not None:
2218+
return self._pt2_stride_per_key_per_rank
2219+
stride_per_key_per_rank = _stride_per_key_per_rank.tolist()
2220+
for stride_per_rank in stride_per_key_per_rank:
2221+
pt2_checks_all_is_size(stride_per_rank)
2222+
self._pt2_stride_per_key_per_rank = stride_per_key_per_rank
2223+
return stride_per_key_per_rank
2224+
return (
2225+
[]
2226+
if _stride_per_key_per_rank is None
2227+
else _stride_per_key_per_rank.tolist()
2228+
)
21982229

21992230
def variable_stride_per_key(self) -> bool:
22002231
"""
22012232
Returns whether the KeyedJaggedTensor has variable stride per key.
2233+
NOTE: `self._variable_stride_per_key` could be `False` when `self._stride_per_key_per_rank`
2234+
is not `None`. It might be assigned to False externally/intentionally, usually the
2235+
`self._stride_per_key_per_rank` is trivial.
22022236
22032237
Returns:
22042238
bool: whether the KeyedJaggedTensor has variable stride per key.
@@ -2343,13 +2377,16 @@ def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
23432377
start_offset = 0
23442378
_length_per_key = self.length_per_key()
23452379
_offset_per_key = self.offset_per_key()
2380+
# use local copy/ref for self._stride_per_key_per_rank to satisfy jit.script
2381+
_stride_per_key_per_rank = self._stride_per_key_per_rank
23462382
for segment in segments:
23472383
end = start + segment
23482384
end_offset = _offset_per_key[end]
23492385
keys: List[str] = self._keys[start:end]
23502386
stride_per_key_per_rank = (
2351-
self.stride_per_key_per_rank()[start:end]
2387+
_stride_per_key_per_rank[start:end, :]
23522388
if self.variable_stride_per_key()
2389+
and _stride_per_key_per_rank is not None
23532390
else None
23542391
)
23552392
if segment == len(self._keys):
@@ -2496,18 +2533,21 @@ def permute(
24962533
)
24972534

24982535
length_per_key = self.length_per_key()
2536+
permuted_stride_per_key = None if self._stride_per_key is None else []
24992537
permuted_keys: List[str] = []
2500-
permuted_stride_per_key_per_rank: List[List[int]] = []
25012538
permuted_length_per_key: List[int] = []
25022539
permuted_length_per_key_sum = 0
25032540
for index in indices:
25042541
key = self.keys()[index]
25052542
permuted_keys.append(key)
25062543
permuted_length_per_key.append(length_per_key[index])
2507-
if self.variable_stride_per_key():
2508-
permuted_stride_per_key_per_rank.append(
2509-
self.stride_per_key_per_rank()[index]
2510-
)
2544+
if permuted_stride_per_key is not None:
2545+
permuted_stride_per_key.append(self._stride_per_key[index])
2546+
_stride_per_key_per_rank = self._stride_per_key_per_rank
2547+
if self.variable_stride_per_key() and _stride_per_key_per_rank is not None:
2548+
permuted_stride_per_key_per_rank = _stride_per_key_per_rank[indices, :]
2549+
else:
2550+
permuted_stride_per_key_per_rank = None
25112551

25122552
permuted_length_per_key_sum = sum(permuted_length_per_key)
25132553
if not torch.jit.is_scripting() and is_non_strict_exporting():
@@ -2559,18 +2599,16 @@ def permute(
25592599
self.weights_or_none(),
25602600
permuted_length_per_key_sum,
25612601
)
2562-
stride_per_key_per_rank = (
2563-
permuted_stride_per_key_per_rank if self.variable_stride_per_key() else None
2564-
)
2602+
25652603
kjt = KeyedJaggedTensor(
25662604
keys=permuted_keys,
25672605
values=permuted_values,
25682606
weights=permuted_weights,
25692607
lengths=permuted_lengths.view(-1),
25702608
offsets=None,
25712609
stride=self._stride,
2572-
stride_per_key_per_rank=stride_per_key_per_rank,
2573-
stride_per_key=None,
2610+
stride_per_key_per_rank=permuted_stride_per_key_per_rank,
2611+
stride_per_key=permuted_stride_per_key,
25742612
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
25752613
lengths_offset_per_key=None,
25762614
offset_per_key=None,
@@ -2887,7 +2925,7 @@ def dist_init(
28872925

28882926
if variable_stride_per_key:
28892927
assert stride_per_rank_per_key is not None
2890-
stride_per_key_per_rank_tensor: torch.Tensor = stride_per_rank_per_key.view(
2928+
stride_per_key_per_rank: torch.Tensor = stride_per_rank_per_key.view(
28912929
num_workers, len(keys)
28922930
).T.cpu()
28932931

@@ -2924,23 +2962,18 @@ def dist_init(
29242962
weights,
29252963
)
29262964

2927-
stride_per_key_per_rank = torch.jit.annotate(
2928-
List[List[int]], stride_per_key_per_rank_tensor.tolist()
2929-
)
2965+
if stride_per_key_per_rank.numel() == 0:
2966+
stride_per_key_per_rank = torch.zeros(
2967+
(len(keys), 1), device="cpu", dtype=torch.int64
2968+
)
29302969

2931-
if not stride_per_key_per_rank:
2932-
stride_per_key_per_rank = [[0]] * len(keys)
29332970
if stagger > 1:
2934-
stride_per_key_per_rank_stagger: List[List[int]] = []
29352971
local_world_size = num_workers // stagger
2936-
for i in range(len(keys)):
2937-
stride_per_rank_stagger: List[int] = []
2938-
for j in range(local_world_size):
2939-
stride_per_rank_stagger.extend(
2940-
stride_per_key_per_rank[i][j::local_world_size]
2941-
)
2942-
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
2943-
stride_per_key_per_rank = stride_per_key_per_rank_stagger
2972+
indices = [
2973+
list(range(i, num_workers, local_world_size))
2974+
for i in range(local_world_size)
2975+
]
2976+
stride_per_key_per_rank = stride_per_key_per_rank[:, indices]
29442977

29452978
kjt = KeyedJaggedTensor(
29462979
keys=keys,

0 commit comments

Comments
 (0)