From 8253305152d1776b234266f247ccc7e8dc224598 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Thu, 12 Dec 2024 17:39:17 +0100 Subject: [PATCH 01/60] Added Keypoints to the library --- torchvision/tv_tensors/__init__.py | 3 ++ torchvision/tv_tensors/_keypoints.py | 78 ++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 torchvision/tv_tensors/_keypoints.py diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 1ba47f60a36..984d8070ac3 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -6,6 +6,7 @@ from ._torch_function_helpers import set_return_type from ._tv_tensor import TVTensor from ._video import Video +from ._keypoints import KeyPoints # TODO: Fix this. We skip this method as it leads to @@ -31,5 +32,7 @@ def wrap(wrappee, *, like, **kwargs): format=kwargs.get("format", like.format), canvas_size=kwargs.get("canvas_size", like.canvas_size), ) + elif isinstance(like, KeyPoints): + return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) else: return wrappee.as_subclass(type(like)) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py new file mode 100644 index 00000000000..410d4cf6859 --- /dev/null +++ b/torchvision/tv_tensors/_keypoints.py @@ -0,0 +1,78 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union +from torch.utils._pytree import tree_flatten +import torch +from ._tv_tensor import TVTensor + + +class KeyPoints(TVTensor): + """:class:`torch.Tensor` subclass for tensors with shape ``[..., 2]`` that represent points in an image. + + Each point is represented by its XY coordinates. + + Args: + data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. + canvas_size (two-tuple of ints): Height and width of the corresponding image or video. + dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from + ``data``. + device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a + :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU. + requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and + ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. + """ + + canvas_size: Tuple[int, int] + + def __new__( + cls, data: Any, *, dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + ): + tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) + if tensor.ndim == 1: + tensor = tensor.unsqueeze(0) + elif tensor.shape[-1] != 2: + raise ValueError(f"Expected a tensor of shape (..., 2), not {tensor.shape}") + points = tensor.as_subclass(cls) + points.canvas_size = canvas_size + return points + + if TYPE_CHECKING: + # EVIL: Just so that MYPY+PYLANCE+others stop shouting that everything is wrong when initializeing the TVTensor + # Not read or defined at Runtime (only at linting time). + # TODO: Add this to all TVTensors + def __init__( + self, data: Any, *, dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + ): + ... + + @classmethod + def _wrap_output( + cls, output: Any, args: Sequence[Any] = (), kwargs: Optional[Mapping[str, Any]] = None, + ) -> Any: + # Mostly copied over from the BoundingBoxes TVTensor, minor improvements. + # This copies over the metadata. + # For BoundingBoxes, that included format, but we only support one format here ! + flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator] + first_bbox_from_args = next(x for x in flat_params if isinstance(x, KeyPoints)) + canvas_size: Tuple[int, int] = first_bbox_from_args.canvas_size + + if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): + output = KeyPoints(output, canvas_size=canvas_size) + elif isinstance(output, tuple): + # NB: output is checked against sequence because it has already been checked against Tensor + # Since a Tensor is a sequence of Tensor, had it not been the case, we may have had silent + # or complex errors + output = tuple( + KeyPoints(part, canvas_size=canvas_size) + for part in output + ) + elif isinstance(output, MutableSequence): + for i, part in enumerate(output): + output[i] = KeyPoints(part, canvas_size=canvas_size) + return output + + def __repr__(self, *, tensor_contents: Any = None) -> str: + return self._make_repr(canvas_size=self.canvas_size) From 484561d493ae06ecb50a4094f31e7479504e3a23 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:50:47 +0100 Subject: [PATCH 02/60] Improved KeyPoints to be exported --- torchvision/tv_tensors/__init__.py | 16 +++++++++++++--- torchvision/tv_tensors/_keypoints.py | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 984d8070ac3..3a56bf88330 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -1,3 +1,4 @@ +from typing import TypeVar import torch from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat @@ -9,11 +10,14 @@ from ._keypoints import KeyPoints +_WRAP_LIKE_T = TypeVar("_WRAP_LIKE_T", bound=TVTensor) + + # TODO: Fix this. We skip this method as it leads to # RecursionError: maximum recursion depth exceeded while calling a Python object # Until `disable` is removed, there will be graph breaks after all calls to functional transforms @torch.compiler.disable -def wrap(wrappee, *, like, **kwargs): +def wrap(wrappee: torch.Tensor, *, like: _WRAP_LIKE_T, **kwargs) -> _WRAP_LIKE_T: """Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``. If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of @@ -27,12 +31,18 @@ def wrap(wrappee, *, like, **kwargs): Ignored otherwise. """ if isinstance(like, BoundingBoxes): - return BoundingBoxes._wrap( + return BoundingBoxes._wrap( # type:ignore wrappee, format=kwargs.get("format", like.format), canvas_size=kwargs.get("canvas_size", like.canvas_size), ) elif isinstance(like, KeyPoints): - return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) + return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) # type:ignore else: return wrappee.as_subclass(type(like)) + + +__all__: list[str] = [ + "wrap", "KeyPoints", "Video", "TVTensor", "set_return_type", + "Mask", "Image", "BoundingBoxFormat", "BoundingBoxes" +] diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 410d4cf6859..d044bb77824 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -1,7 +1,7 @@ from __future__ import annotations from typing import TYPE_CHECKING, Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union -from torch.utils._pytree import tree_flatten import torch +from torch.utils._pytree import tree_flatten from ._tv_tensor import TVTensor From 3255890a384043788d2485129b9b3f0dbb170297 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:51:11 +0100 Subject: [PATCH 03/60] Added kernels to support the keypoints --- torchvision/transforms/v2/_misc.py | 2 +- torchvision/transforms/v2/_utils.py | 16 +- .../transforms/v2/functional/__init__.py | 13 + .../transforms/v2/functional/_geometry.py | 348 ++++++++++++++++-- torchvision/transforms/v2/functional/_meta.py | 31 ++ torchvision/transforms/v2/functional/_misc.py | 64 ++++ 6 files changed, 451 insertions(+), 23 deletions(-) diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index d38a6ad8767..ccb5968cd59 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,7 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform -from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_bounding_boxes, has_any, is_pure_tensor +from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_all_keypoints, get_bounding_boxes, has_any, is_pure_tensor # TODO: do we want/need to expose this? diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index dd65ca4d9c9..4e6e76418ec 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -4,7 +4,7 @@ import numbers from contextlib import suppress -from typing import Any, Callable, Dict, List, Literal, Sequence, Tuple, Type, Union +from typing import Any, Callable, Dict, Iterable, List, Literal, Sequence, Tuple, Type, Union import PIL.Image import torch @@ -162,6 +162,20 @@ def get_bounding_boxes(flat_inputs: List[Any]) -> tv_tensors.BoundingBoxes: raise ValueError("No bounding boxes were found in the sample") +def get_all_keypoints(flat_inputs: List[Any]) -> Iterable[tv_tensors.KeyPoints]: + """Yields all KeyPoints in the input. + + Raises: + ValueError: No KeyPoints can be found + """ + generator = (inpt for inpt in flat_inputs if isinstance(inpt, tv_tensors.KeyPoints)) + try: + yield next(generator) + except StopIteration: + raise ValueError("No Keypoints were found in the sample.") + return generator + + def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]: """Return Channel, Height, and Width.""" chws = { diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index d5705d55c4b..cbc6e02b2fb 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -5,6 +5,7 @@ from ._meta import ( clamp_bounding_boxes, convert_bounding_box_format, + convert_box_to_points, get_dimensions_image, get_dimensions_video, get_dimensions, @@ -67,21 +68,25 @@ ) from ._geometry import ( affine, + affine_keypoints, affine_bounding_boxes, affine_image, affine_mask, affine_video, center_crop, + center_crop_keypoints, center_crop_bounding_boxes, center_crop_image, center_crop_mask, center_crop_video, crop, + crop_keypoints, crop_bounding_boxes, crop_image, crop_mask, crop_video, elastic, + elastic_keypoints, elastic_bounding_boxes, elastic_image, elastic_mask, @@ -92,31 +97,37 @@ five_crop_video, hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file horizontal_flip, + horizontal_flip_keypoints, horizontal_flip_bounding_boxes, horizontal_flip_image, horizontal_flip_mask, horizontal_flip_video, pad, + pad_keypoints, pad_bounding_boxes, pad_image, pad_mask, pad_video, perspective, + perspectice_keypoints, perspective_bounding_boxes, perspective_image, perspective_mask, perspective_video, resize, + resize_keypoints, resize_bounding_boxes, resize_image, resize_mask, resize_video, resized_crop, + resized_crop_keypoints, resized_crop_bounding_boxes, resized_crop_image, resized_crop_mask, resized_crop_video, rotate, + rotate_keypoints, rotate_bounding_boxes, rotate_image, rotate_mask, @@ -129,6 +140,7 @@ vertical_flip_image, vertical_flip_mask, vertical_flip_video, + vertical_flip_keypoints, vflip, ) from ._misc import ( @@ -143,6 +155,7 @@ normalize_image, normalize_video, sanitize_bounding_boxes, + sanitize_keypoints, to_dtype, to_dtype_image, to_dtype_video, diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index da080e437c9..a80b246630a 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -22,7 +22,7 @@ from torchvision.utils import _log_api_usage_once -from ._meta import _get_size_image_pil, clamp_bounding_boxes, convert_bounding_box_format +from ._meta import _get_size_image_pil, clamp_bounding_boxes, clamp_keypoints, convert_bounding_box_format from ._utils import _FillTypeJIT, _get_kernel, _register_five_ten_crop_kernel_internal, _register_kernel_internal @@ -65,6 +65,11 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: return horizontal_flip_image(mask) +@_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def horizontal_flip_keypoints(kp: tv_tensors.KeyPoints): + return kp.sub_(kp.canvas_size[1]).neg_() + + def horizontal_flip_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, canvas_size: Tuple[int, int] ) -> torch.Tensor: @@ -122,6 +127,11 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: return vertical_flip_image(mask) +@_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): + return kp.sub_(kp.canvas_size[1]).neg_() + + def vertical_flip_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, canvas_size: Tuple[int, int] ) -> torch.Tensor: @@ -333,6 +343,38 @@ def _resize_mask_dispatch( return tv_tensors.wrap(output, like=inpt) +def resize_keypoints( + kp: torch.Tensor, size: Optional[List[int]], + canvas_size: Tuple[int, int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[bool] = True, +): + old_height, old_width = canvas_size + new_height, new_width = _compute_resized_output_size(canvas_size, size=size, max_size=max_size) + + w_ratio = new_width / old_width + h_ratio = new_height / old_height + ratios = torch.tensor([w_ratio, h_ratio]) + kp.data = kp.data.mul(ratios).to(kp.dtype) + + return kp, (new_height, new_width) + + +@_register_kernel_internal(resize, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _resize_keypoints_dispatch( + kp: tv_tensors.KeyPoints, size: Optional[List[int]], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[bool] = True, +) -> tv_tensors.KeyPoints: + out, canvas_size = resize_keypoints( + kp.as_subclass(torch.Tensor), size, canvas_size=kp.canvas_size, interpolation=interpolation, + max_size=max_size, antialias=antialias + ) + return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + + def resize_bounding_boxes( bounding_boxes: torch.Tensor, canvas_size: Tuple[int, int], @@ -758,6 +800,67 @@ def _affine_image_pil( return _FP.affine(image, matrix, interpolation=pil_modes_mapping[interpolation], fill=fill) +def _affine_keypoints_with_expand( + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + center: Optional[List[float]] = None, + expand: bool = False, +) -> Tuple[torch.Tensor, Tuple[int, int]]: + if keypoints.numel() == 0: + return keypoints, canvas_size + + original_dtype = keypoints.dtype + keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() + dtype = keypoints.dtype + device = keypoints.device + + angle, translate, shear, center = _affine_parse_args( + angle, translate, scale, shear, InterpolationMode.NEAREST, center + ) + + if center is None: + height, width = canvas_size + center = [width * 0.5, height * 0.5] + + affine_vector = _get_inverse_affine_matrix(center, angle, translate, scale, shear, inverted=False) + transposed_affine_matrix = ( + torch.tensor( + affine_vector, + dtype=dtype, + device=device, + ) + .reshape(2, 3) + .T + ) + # 1) Unlike bounding box (whose implmentation we stole) we're already a bunch of points. + keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=device, dtype=dtype)], dim=-1) + # 2) Now let's transform the points using affine matrix + keypoints = torch.matmul(keypoints, transposed_affine_matrix).to(original_dtype) + + return keypoints, canvas_size + + +@_register_kernel_internal(affine, tv_tensors.KeyPoints) +def affine_keypoints( + keypoints: tv_tensors.KeyPoints, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + center: Optional[List[float]] = None, +): + return _affine_keypoints_with_expand( + keypoints=keypoints.as_subclass(torch.Tensor), + canvas_size=keypoints.canvas_size, + angle=angle, translate=translate, scale=scale, shear=shear, + center=center, expand=False + ) + + def _affine_bounding_boxes_with_expand( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1055,6 +1158,29 @@ def _rotate_image_pil( ) +def rotate_keypoints( + keypoints: tv_tensors.KeyPoints, + angle: float, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[float]] = None, + fill: _FillTypeJIT = None, +) -> Tuple[torch.Tensor, Tuple[int, int]]: + return _affine_keypoints_with_expand( + keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, + angle=-angle, translate=[0.0, 0.0], scale=1.0, + shear=[0.0, 0.0], center=center, expand=expand, + ) + + +@_register_kernel_internal(rotate, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) +def _rotate_keypoints_dispatch( + kp: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs +) -> tv_tensors.KeyPoints: + out, canvas_size = rotate_keypoints(kp, angle, center=center, expand=expand, **kwargs) + return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + + def rotate_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1318,6 +1444,36 @@ def pad_mask( return output +def pad_keypoints( + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], + padding: List[int], + padding_mode: str = "constant" +): + SUPPORTED_MODES = ["constant"] + if padding_mode not in SUPPORTED_MODES: + # TODO: add support of other padding modes + raise ValueError( + f"Padding mode '{padding_mode}' is not supported with KeyPoints" + f" (supported modes are {', '.join(SUPPORTED_MODES)})" + ) + left, right, top, bottom = _parse_pad_padding(padding) + pad = torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device) + canvas_size = (canvas_size[0] + top + bottom, canvas_size[1] + left + right) + return clamp_keypoints(keypoints + pad, canvas_size), canvas_size + + +@_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensors_wrapper=False) +def _pad( + keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs +) -> tv_tensors.KeyPoints: + output, canvas_size = pad_keypoints( + keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, + padding=padding, padding_mode=padding_mode + ) + return tv_tensors.wrap(output, like=keypoints, canvas_size=canvas_size) + + def pad_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1404,6 +1560,26 @@ def crop_image(image: torch.Tensor, top: int, left: int, height: int, width: int _register_kernel_internal(crop, PIL.Image.Image)(_crop_image_pil) +def crop_keypoints( + kp: torch.Tensor, + top: int, + left: int, + height: int, + width: int, +) -> Tuple[torch.Tensor, Tuple[int, int]]: + + kp.sub_(torch.tensor([left, top], dtype=kp.dtype, device=kp.device)) + canvas_size = (height, width) + + return clamp_keypoints(kp, canvas_size=canvas_size), canvas_size + + +@_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int) -> tv_tensors.KeyPoints: + out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) + return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + + def crop_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1577,6 +1753,51 @@ def _perspective_image_pil( return _FP.perspective(image, perspective_coeffs, interpolation=pil_modes_mapping[interpolation], fill=fill) +def perspectice_keypoints( + kp: torch.Tensor, + canvas_size: Tuple[int, int], + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + coefficients: Optional[List[float]] = None, +): + if kp.numel() == 0: + return kp + dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 + device = kp.device + + perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients) + + denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3] + if denom == 0: + raise RuntimeError( + f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform bounding boxes. " + f"Denominator is zero, denom={denom}" + ) + + theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) + + numer_points = torch.matmul(kp, theta1.T) + denom_points = torch.matmul(kp, theta2.T) + transformed_points = numer_points.div_(denom_points) + return clamp_keypoints(transformed_points, canvas_size) + + +@_register_kernel_internal(perspective, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _perspective_keypoints_dispatch( + inpt: tv_tensors.BoundingBoxes, + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + coefficients: Optional[List[float]] = None, + **kwargs, +) -> tv_tensors.BoundingBoxes: + output = perspectice_keypoints( + inpt.as_subclass(torch.Tensor), + canvas_size=inpt.canvas_size, startpoints=startpoints, + endpoints=endpoints, coefficients=coefficients, + ) + return tv_tensors.wrap(output, like=inpt) + + def perspective_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1618,26 +1839,7 @@ def perspective_bounding_boxes( f"Denominator is zero, denom={denom}" ) - inv_coeffs = [ - (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, - (-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom, - (perspective_coeffs[1] * perspective_coeffs[5] - perspective_coeffs[2] * perspective_coeffs[4]) / denom, - (-perspective_coeffs[3] + perspective_coeffs[5] * perspective_coeffs[6]) / denom, - (perspective_coeffs[0] - perspective_coeffs[2] * perspective_coeffs[6]) / denom, - (-perspective_coeffs[0] * perspective_coeffs[5] + perspective_coeffs[2] * perspective_coeffs[3]) / denom, - (-perspective_coeffs[4] * perspective_coeffs[6] + perspective_coeffs[3] * perspective_coeffs[7]) / denom, - (-perspective_coeffs[0] * perspective_coeffs[7] + perspective_coeffs[1] * perspective_coeffs[6]) / denom, - ] - - theta1 = torch.tensor( - [[inv_coeffs[0], inv_coeffs[1], inv_coeffs[2]], [inv_coeffs[3], inv_coeffs[4], inv_coeffs[5]]], - dtype=dtype, - device=device, - ) - - theta2 = torch.tensor( - [[inv_coeffs[6], inv_coeffs[7], 1.0], [inv_coeffs[6], inv_coeffs[7], 1.0]], dtype=dtype, device=device - ) + theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) # 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners). # Tensor of points has shape (N * 4, 3), where N is the number of bboxes @@ -1671,6 +1873,33 @@ def perspective_bounding_boxes( ).reshape(original_shape) +def _compute_perspective_thetas( + perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, / +) -> Tuple[torch.Tensor, torch.Tensor]: + inv_coeffs = [ + (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, + (-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom, + (perspective_coeffs[1] * perspective_coeffs[5] - perspective_coeffs[2] * perspective_coeffs[4]) / denom, + (-perspective_coeffs[3] + perspective_coeffs[5] * perspective_coeffs[6]) / denom, + (perspective_coeffs[0] - perspective_coeffs[2] * perspective_coeffs[6]) / denom, + (-perspective_coeffs[0] * perspective_coeffs[5] + perspective_coeffs[2] * perspective_coeffs[3]) / denom, + (-perspective_coeffs[4] * perspective_coeffs[6] + perspective_coeffs[3] * perspective_coeffs[7]) / denom, + (-perspective_coeffs[0] * perspective_coeffs[7] + perspective_coeffs[1] * perspective_coeffs[6]) / denom, + ] + + theta1 = torch.tensor( + [[inv_coeffs[0], inv_coeffs[1], inv_coeffs[2]], [inv_coeffs[3], inv_coeffs[4], inv_coeffs[5]]], + dtype=dtype, + device=device, + ) + + theta2 = torch.tensor( + [[inv_coeffs[6], inv_coeffs[7], 1.0], [inv_coeffs[6], inv_coeffs[7], 1.0]], dtype=dtype, device=device + ) + + return theta1, theta2 + + @_register_kernel_internal(perspective, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) def _perspective_bounding_boxes_dispatch( inpt: tv_tensors.BoundingBoxes, @@ -1831,6 +2060,48 @@ def _create_identity_grid(size: Tuple[int, int], device: torch.device, dtype: to return base_grid +def elastic_keypoints( + kp: torch.Tensor, + canvas_size: Tuple[int, int], + displacement: torch.Tensor +) -> torch.Tensor: + expected_shape = (1, canvas_size[0], canvas_size[1], 2) + if not isinstance(displacement, torch.Tensor): + raise TypeError("Argument displacement should be a Tensor") + elif displacement.shape != expected_shape: + raise ValueError(f"Argument displacement shape should be {expected_shape}, but given {displacement.shape}") + + if kp.numel() == 0: + return kp + + device = kp.device + dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 + + if displacement.dtype != dtype or displacement.device != device: + displacement = displacement.to(dtype=dtype, device=device) + + id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype) + inv_grid = id_grid.sub_(displacement) + + index_xy = kp.to(dtype=torch.long) + index_x, index_y = index_xy[:, 0], index_xy[:, 1] + + t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype) + transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5) + + return clamp_keypoints(transformed_points, canvas_size=canvas_size) + + +@_register_kernel_internal(elastic, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _elastic_keypoints_dispatch( + inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs +): + output = elastic_keypoints( + inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement + ) + return tv_tensors.wrap(output, like=inpt) + + def elastic_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -2011,6 +2282,26 @@ def _center_crop_image_pil(image: PIL.Image.Image, output_size: List[int]) -> PI return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width) +def center_crop_keypoints( + inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int] +): + crop_height, crop_width = _center_crop_parse_output_size(output_size) + crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size) + return crop_keypoints( + inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width + ) + + +@_register_kernel_internal(center_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _center_crop_keypoints_dispatch( + inpt: tv_tensors.KeyPoints, output_size: List[int] +) -> tv_tensors.KeyPoints: + output, canvas_size = center_crop_keypoints( + inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, output_size=output_size + ) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) + + def center_crop_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -2146,6 +2437,21 @@ def _resized_crop_image_pil_dispatch( ) +def resized_crop_keypoints( + kp: torch.Tensor, top: int, left: int, height: int, width: int, size: List[int], +) -> Tuple[torch.Tensor, Tuple[int, int]]: + kp, canvas_size = crop_keypoints(kp, top, left, height, width) + return resize_keypoints(kp, size=size, canvas_size=canvas_size) + + +@_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _resized_crop_dispatch(inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs): + out, canvas_size = resized_crop_bounding_boxes( + inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size + ) + return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + + def resized_crop_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index b90e5fb7b5b..022c1cf7f25 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -176,6 +176,24 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: return xyxy +def _xyxy_to_points( + bounding_boxes: torch.Tensor +) -> torch.Tensor: + return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2) + + +def convert_box_to_points( + bounding_boxes: tv_tensors.BoundingBoxes +) -> tv_tensors.KeyPoints: + bbox = _convert_bounding_box_format( + bounding_boxes.as_subclass(torch.Tensor), + old_format=bounding_boxes.format, + new_format=BoundingBoxFormat.XYXY, + inplace=False + ) + return tv_tensors.KeyPoints(_xyxy_to_points(bbox), canvas_size=bounding_boxes.canvas_size) + + def _convert_bounding_box_format( bounding_boxes: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, inplace: bool = False ) -> torch.Tensor: @@ -254,6 +272,19 @@ def _clamp_bounding_boxes( return out_boxes.to(in_dtype) +def clamp_keypoints( + inpt: torch.Tensor, + canvas_size: Tuple[int, int] +) -> torch.Tensor: + if not torch.jit.is_scripting(): + _log_api_usage_once(clamp_bounding_boxes) + dtype = inpt.dtype + inpt = inpt.float() + inpt[..., 0].clamp_(0, canvas_size[1]) + inpt[..., 1].clamp_(0, canvas_size[0]) + return inpt.to(dtype=dtype) + + def clamp_bounding_boxes( inpt: torch.Tensor, format: Optional[BoundingBoxFormat] = None, diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index f40bf117753..b4559ab95e4 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -320,6 +320,7 @@ def to_dtype_video(video: torch.Tensor, dtype: torch.dtype = torch.float, scale: return to_dtype_image(video, dtype, scale=scale) +@_register_kernel_internal(to_dtype, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @_register_kernel_internal(to_dtype, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) @_register_kernel_internal(to_dtype, tv_tensors.Mask, tv_tensor_wrapper=False) def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: bool = False) -> torch.Tensor: @@ -327,6 +328,69 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo return inpt.to(dtype) +def sanitize_keypoints( + keypoints: torch.Tensor, + canvas_size: Optional[Tuple[int, int]] = None +) -> Tuple[torch.Tensor, torch.Tensor]: + """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. + + This removes the keypoints that are outside of their corresponing image. + You may want to first call :func:`~torchvision.transforms.v2.functional.clam_keypoints` + first to avoid undesired removals. + + .. note:: + Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes` + + Raises: + ValueError: If the keypoints are not passed as a two dimensional tensor. + + Args: + keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed + canvas_size (Optional[Tuple[int, int]], optional): The canvas_size of the bounding boxes + (size of the corresponding image/video). + Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. + + Returns: + out (tuple of Tensors): The subset of valid bounding boxes, and the corresponding indexing mask. + The mask can then be used to subset other tensors (e.g. labels) that are associated with the bounding boxes. + """ + if not keypoints.ndim == 2: + if keypoints.ndim < 2: + raise ValueError("Cannot sanitize a single Keypoint") + raise ValueError( + "Cannot sanitize KeyPoints structure that are not 2D. " + f"Expected shape to be (N, 2), got {keypoints.shape} ({keypoints.ndim=}, not 2)" + ) + if torch.jit.is_scripting() or is_pure_tensor(keypoints): + if canvas_size is None: + raise ValueError( + "canvas_size cannot be None if keypoints is a pure tensor. " + f"Got canvas_size={canvas_size}." + "Set that to appropriate values or pass keypoints as a tv_tensors.KeyPoints object." + ) + valid = _get_sanitize_keypoints_mask( + keypoints, canvas_size=canvas_size, + ) + return keypoints[valid], valid + if not isinstance(keypoints, tv_tensors.KeyPoints): + raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") + valid = _get_sanitize_keypoints_mask( + keypoints, canvas_size=keypoints.canvas_size, + ) + return tv_tensors.wrap(keypoints[valid], like=keypoints), valid + + +def _get_sanitize_keypoints_mask( + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], +) -> torch.Tensor: + image_h, image_w = canvas_size + x = keypoints[:, 0] + y = keypoints[:, 1] + + return (0 < x) & (x < image_w) & (0 < y) & (y < image_h) + + def sanitize_bounding_boxes( bounding_boxes: torch.Tensor, format: Optional[tv_tensors.BoundingBoxFormat] = None, From 7436636b631c6a2397af438ca0195d948956061a Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:51:22 +0100 Subject: [PATCH 04/60] Added tests for keypoints --- test/common_utils.py | 12 ++++++ test/test_transforms_v2.py | 20 ++++++++++ test/test_transforms_v2_utils.py | 54 +++++++++++++++------------ test/test_tv_tensors.py | 63 +++++++++++++++++++++++++++----- 4 files changed, 115 insertions(+), 34 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 99c7931587d..0fafdce5d9e 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -8,6 +8,7 @@ import shutil import sys import tempfile +from typing import Sequence, Tuple import warnings from subprocess import CalledProcessError, check_output, STDOUT @@ -402,6 +403,17 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) +def make_keypoints(canvas_size: Tuple[int, int] = DEFAULT_SIZE, num_points: int | Sequence[int] = 4, dtype=None, device='cpu'): + """Make the KeyPoints for testing purposes""" + if isinstance(num_points, int): + num_points = [num_points] + half_point: Tuple[int, ...] = tuple(num_points) + (1,) + y = torch.randint(0, canvas_size[0] - 1, half_point, dtype=dtype, device=device) + x = torch.randint(0, canvas_size[1] - 1, half_point, dtype=dtype, device=device) + points = torch.cat((x, y), dim=-1) + return tv_tensors.KeyPoints(points, canvas_size=canvas_size) + + def make_bounding_boxes( canvas_size=DEFAULT_SIZE, *, diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index fb49525ecfe..35a0befb896 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -31,6 +31,7 @@ make_image, make_image_pil, make_image_tensor, + make_keypoints, make_segmentation_mask, make_video, make_video_tensor, @@ -223,6 +224,7 @@ def check_functional_kernel_signature_match(functional, *, kernel, input_type): # explicitly passed to the kernel. explicit_metadata = { tv_tensors.BoundingBoxes: {"format", "canvas_size"}, + tv_tensors.KeyPoints: {"canvas_size"} } kernel_params = [param for param in kernel_params if param.name not in explicit_metadata.get(input_type, set())] @@ -327,6 +329,18 @@ def _make_transform_sample(transform, *, image_or_video, adapter): canvas_size=size, device=device, ), + keypoints=make_keypoints(), keypoints_degenerate=tv_tensors.KeyPoints( + [ + [0, 1], # left edge + [1, 0], # top edge + [0, 0], # top left corner + [size[1], 1], # right edge + [size[1], 0], # top right corner + [1, size[0]], # bottom edge + [0, size[0]], # bottom left corner + [size[1], size[0]] # bottom right corner + ], canvas_size=size, device=device + ), detection_mask=make_detection_masks(size, device=device), segmentation_mask=make_segmentation_mask(size, device=device), int=0, @@ -680,6 +694,7 @@ def test_functional(self, size, make_input): (F.resize_image, torch.Tensor), (F._geometry._resize_image_pil, PIL.Image.Image), (F.resize_image, tv_tensors.Image), + (F.resize_keypoints, tv_tensors.KeyPoints), (F.resize_bounding_boxes, tv_tensors.BoundingBoxes), (F.resize_mask, tv_tensors.Mask), (F.resize_video, tv_tensors.Video), @@ -1035,6 +1050,7 @@ def test_functional(self, make_input): (F.horizontal_flip_image, torch.Tensor), (F._geometry._horizontal_flip_image_pil, PIL.Image.Image), (F.horizontal_flip_image, tv_tensors.Image), + (F.horizontal_flip_keypoints, tv_tensors.KeyPoints), (F.horizontal_flip_bounding_boxes, tv_tensors.BoundingBoxes), (F.horizontal_flip_mask, tv_tensors.Mask), (F.horizontal_flip_video, tv_tensors.Video), @@ -1203,6 +1219,7 @@ def test_functional(self, make_input): (F.affine_image, torch.Tensor), (F._geometry._affine_image_pil, PIL.Image.Image), (F.affine_image, tv_tensors.Image), + (F.affine_keypoints, tv_tensors.KeyPoints), (F.affine_bounding_boxes, tv_tensors.BoundingBoxes), (F.affine_mask, tv_tensors.Mask), (F.affine_video, tv_tensors.Video), @@ -1485,6 +1502,7 @@ def test_functional(self, make_input): (F.vertical_flip_image, torch.Tensor), (F._geometry._vertical_flip_image_pil, PIL.Image.Image), (F.vertical_flip_image, tv_tensors.Image), + (F.vertical_flip_keypoints, tv_tensors.KeyPoints), (F.vertical_flip_bounding_boxes, tv_tensors.BoundingBoxes), (F.vertical_flip_mask, tv_tensors.Mask), (F.vertical_flip_video, tv_tensors.Video), @@ -1627,6 +1645,7 @@ def test_functional(self, make_input): (F.rotate_image, torch.Tensor), (F._geometry._rotate_image_pil, PIL.Image.Image), (F.rotate_image, tv_tensors.Image), + (F.rotate_keypoints, tv_tensors.KeyPoints), (F.rotate_bounding_boxes, tv_tensors.BoundingBoxes), (F.rotate_mask, tv_tensors.Mask), (F.rotate_video, tv_tensors.Video), @@ -2332,6 +2351,7 @@ def test_error(self, T): F.to_pil_image(imgs[0]), tv_tensors.Mask(torch.rand(12, 12)), tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12), + tv_tensors.KeyPoints(torch.rand(4, 2), canvas_size=(12, 12)) ): with pytest.raises(ValueError, match="does not support PIL images, "): cutmix_mixup(input_with_bad_type) diff --git a/test/test_transforms_v2_utils.py b/test/test_transforms_v2_utils.py index 53222c6a2c8..cda255d0173 100644 --- a/test/test_transforms_v2_utils.py +++ b/test/test_transforms_v2_utils.py @@ -4,7 +4,7 @@ import torch import torchvision.transforms.v2._utils -from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_masks, make_image +from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_masks, make_image, make_keypoints from torchvision import tv_tensors from torchvision.transforms.v2._utils import has_all, has_any @@ -14,29 +14,32 @@ IMAGE = make_image(DEFAULT_SIZE, color_space="RGB") BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=tv_tensors.BoundingBoxFormat.XYXY) MASK = make_detection_masks(DEFAULT_SIZE) +KEYPOINTS = make_keypoints(DEFAULT_SIZE) @pytest.mark.parametrize( ("sample", "types", "expected"), [ - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Mask,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), - ((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes), False), - ((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask), False), - ((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask), False), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.KeyPoints), True), + ((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), False), + ((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask, tv_tensors.KeyPoints), False), + ((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), False), + ((KEYPOINTS,), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False), ( - (IMAGE, BOUNDING_BOX, MASK), - (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), + (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), + (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True, ), - ((), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False), - ((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, tv_tensors.Image),), True), - ((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False), - ((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True), + ((), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), False), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (lambda obj: isinstance(obj, tv_tensors.Image),), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (lambda _: False,), False), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (lambda _: True,), True), ((IMAGE,), (tv_tensors.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor), True), ( (torch.Tensor(IMAGE),), @@ -57,15 +60,18 @@ def test_has_any(sample, types, expected): @pytest.mark.parametrize( ("sample", "types", "expected"), [ - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Mask,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask, tv_tensors.KeyPoints), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True), ( - (IMAGE, BOUNDING_BOX, MASK), - (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), + (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), + (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True, ), ((BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), False), diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index ed75ae35ecd..6d4eed9c579 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -2,7 +2,7 @@ import pytest import torch -from common_utils import assert_equal, make_bounding_boxes, make_image, make_segmentation_mask, make_video +from common_utils import assert_equal, make_bounding_boxes, make_image, make_keypoints, make_segmentation_mask, make_video from PIL import Image from torchvision import tv_tensors @@ -49,6 +49,20 @@ def test_bbox_dim_error(): tv_tensors.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32)) +@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 2)), [[0, 0,], [2, 2,]], [1, 2,]]) +def test_keypoints_instance(data): + kpoint = tv_tensors.KeyPoints(data, canvas_size=(32, 32)) + assert isinstance(kpoint, tv_tensors.KeyPoints) + assert type(kpoint) is tv_tensors.KeyPoints + assert kpoint.shape[-1] == 2 + + +def test_keypoints_shape_error(): + data_3d = [(0, 1, 2)] + with pytest.raises(ValueError, match="shape"): + tv_tensors.KeyPoints(torch.tensor(data_3d), canvas_size=(11, 7)) + + @pytest.mark.parametrize( ("data", "input_requires_grad", "expected_requires_grad"), [ @@ -68,7 +82,9 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad): assert tv_tensor.requires_grad is expected_requires_grad -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) def test_isinstance(make_input): assert isinstance(make_input(), torch.Tensor) @@ -80,7 +96,9 @@ def test_wrapping_no_copy(): assert image.data_ptr() == tensor.data_ptr() -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) def test_to_wrapping(make_input): dp = make_input() @@ -90,7 +108,9 @@ def test_to_wrapping(make_input): assert dp_to.dtype is torch.float64 -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_to_tv_tensor_reference(make_input, return_type): tensor = torch.rand((3, 16, 16), dtype=torch.float64) @@ -104,7 +124,9 @@ def test_to_tv_tensor_reference(make_input, return_type): assert type(tensor) is torch.Tensor -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_clone_wrapping(make_input, return_type): dp = make_input() @@ -116,7 +138,9 @@ def test_clone_wrapping(make_input, return_type): assert dp_clone.data_ptr() != dp.data_ptr() -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_requires_grad__wrapping(make_input, return_type): dp = make_input(dtype=torch.float) @@ -131,7 +155,9 @@ def test_requires_grad__wrapping(make_input, return_type): assert dp_requires_grad.requires_grad -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_detach_wrapping(make_input, return_type): dp = make_input(dtype=torch.float).requires_grad_(True) @@ -148,29 +174,42 @@ def test_force_subclass_with_metadata(return_type): # Largely the same as above, we additionally check that the metadata is preserved format, canvas_size = "XYXY", (32, 32) bbox = tv_tensors.BoundingBoxes([[0, 0, 5, 5], [2, 2, 7, 7]], format=format, canvas_size=canvas_size) + kpoints = tv_tensors.KeyPoints([[0, 0], [2, 2]], canvas_size=canvas_size) tv_tensors.set_return_type(return_type) bbox = bbox.clone() + kpoints = kpoints.clone() if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) bbox = bbox.to(torch.float64) + kpoints = kpoints.to(torch.float64) if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) bbox = bbox.detach() + kpoints = kpoints.detach() if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) assert not bbox.requires_grad + assert not kpoints.requires_grad bbox.requires_grad_(True) + kpoints.requires_grad_(True) if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) assert bbox.requires_grad + assert kpoints.requires_grad tv_tensors.set_return_type("tensor") -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_other_op_no_wrapping(make_input, return_type): dp = make_input() @@ -182,7 +221,9 @@ def test_other_op_no_wrapping(make_input, return_type): assert type(output) is (type(dp) if return_type == "TVTensor" else torch.Tensor) -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize( "op", [ @@ -199,7 +240,9 @@ def test_no_tensor_output_op_no_wrapping(make_input, op): assert type(output) is not type(dp) -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_inplace_op_no_wrapping(make_input, return_type): dp = make_input() From b35cba60ae2190c78bd890a746f184fe64f15158 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:52:08 +0100 Subject: [PATCH 05/60] Applied ufmt formatting --- torchvision/transforms/v2/_misc.py | 10 +- .../transforms/v2/functional/__init__.py | 22 ++--- .../transforms/v2/functional/_geometry.py | 91 +++++++++++-------- torchvision/transforms/v2/functional/_meta.py | 15 +-- torchvision/transforms/v2/functional/_misc.py | 9 +- torchvision/tv_tensors/__init__.py | 16 +++- torchvision/tv_tensors/_keypoints.py | 31 +++++-- 7 files changed, 113 insertions(+), 81 deletions(-) diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index ccb5968cd59..f0b83c14bd8 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,7 +9,15 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform -from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_all_keypoints, get_bounding_boxes, has_any, is_pure_tensor +from ._utils import ( + _parse_labels_getter, + _setup_number_or_seq, + _setup_size, + get_all_keypoints, + get_bounding_boxes, + has_any, + is_pure_tensor, +) # TODO: do we want/need to expose this? diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index cbc6e02b2fb..ec649759a72 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -68,27 +68,27 @@ ) from ._geometry import ( affine, - affine_keypoints, affine_bounding_boxes, affine_image, + affine_keypoints, affine_mask, affine_video, center_crop, - center_crop_keypoints, center_crop_bounding_boxes, center_crop_image, + center_crop_keypoints, center_crop_mask, center_crop_video, crop, - crop_keypoints, crop_bounding_boxes, crop_image, + crop_keypoints, crop_mask, crop_video, elastic, - elastic_keypoints, elastic_bounding_boxes, elastic_image, + elastic_keypoints, elastic_mask, elastic_transform, elastic_video, @@ -97,39 +97,39 @@ five_crop_video, hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file horizontal_flip, - horizontal_flip_keypoints, horizontal_flip_bounding_boxes, horizontal_flip_image, + horizontal_flip_keypoints, horizontal_flip_mask, horizontal_flip_video, pad, - pad_keypoints, pad_bounding_boxes, pad_image, + pad_keypoints, pad_mask, pad_video, - perspective, perspectice_keypoints, + perspective, perspective_bounding_boxes, perspective_image, perspective_mask, perspective_video, resize, - resize_keypoints, resize_bounding_boxes, resize_image, + resize_keypoints, resize_mask, resize_video, resized_crop, - resized_crop_keypoints, resized_crop_bounding_boxes, resized_crop_image, + resized_crop_keypoints, resized_crop_mask, resized_crop_video, rotate, - rotate_keypoints, rotate_bounding_boxes, rotate_image, + rotate_keypoints, rotate_mask, rotate_video, ten_crop, @@ -138,9 +138,9 @@ vertical_flip, vertical_flip_bounding_boxes, vertical_flip_image, + vertical_flip_keypoints, vertical_flip_mask, vertical_flip_video, - vertical_flip_keypoints, vflip, ) from ._misc import ( diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index a80b246630a..792965433f1 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -344,7 +344,8 @@ def _resize_mask_dispatch( def resize_keypoints( - kp: torch.Tensor, size: Optional[List[int]], + kp: torch.Tensor, + size: Optional[List[int]], canvas_size: Tuple[int, int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, @@ -363,14 +364,19 @@ def resize_keypoints( @_register_kernel_internal(resize, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _resize_keypoints_dispatch( - kp: tv_tensors.KeyPoints, size: Optional[List[int]], + kp: tv_tensors.KeyPoints, + size: Optional[List[int]], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = True, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( - kp.as_subclass(torch.Tensor), size, canvas_size=kp.canvas_size, interpolation=interpolation, - max_size=max_size, antialias=antialias + kp.as_subclass(torch.Tensor), + size, + canvas_size=kp.canvas_size, + interpolation=interpolation, + max_size=max_size, + antialias=antialias, ) return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) @@ -856,8 +862,12 @@ def affine_keypoints( return _affine_keypoints_with_expand( keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, - angle=angle, translate=translate, scale=scale, shear=shear, - center=center, expand=False + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + expand=False, ) @@ -1167,9 +1177,14 @@ def rotate_keypoints( fill: _FillTypeJIT = None, ) -> Tuple[torch.Tensor, Tuple[int, int]]: return _affine_keypoints_with_expand( - keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, - angle=-angle, translate=[0.0, 0.0], scale=1.0, - shear=[0.0, 0.0], center=center, expand=expand, + keypoints=keypoints.as_subclass(torch.Tensor), + canvas_size=keypoints.canvas_size, + angle=-angle, + translate=[0.0, 0.0], + scale=1.0, + shear=[0.0, 0.0], + center=center, + expand=expand, ) @@ -1445,10 +1460,7 @@ def pad_mask( def pad_keypoints( - keypoints: torch.Tensor, - canvas_size: Tuple[int, int], - padding: List[int], - padding_mode: str = "constant" + keypoints: torch.Tensor, canvas_size: Tuple[int, int], padding: List[int], padding_mode: str = "constant" ): SUPPORTED_MODES = ["constant"] if padding_mode not in SUPPORTED_MODES: @@ -1468,8 +1480,10 @@ def _pad( keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs ) -> tv_tensors.KeyPoints: output, canvas_size = pad_keypoints( - keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, - padding=padding, padding_mode=padding_mode + keypoints.as_subclass(torch.Tensor), + canvas_size=keypoints.canvas_size, + padding=padding, + padding_mode=padding_mode, ) return tv_tensors.wrap(output, like=keypoints, canvas_size=canvas_size) @@ -1575,7 +1589,9 @@ def crop_keypoints( @_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int) -> tv_tensors.KeyPoints: +def crop_keypoints_dispatch( + inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int +) -> tv_tensors.KeyPoints: out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) @@ -1792,8 +1808,10 @@ def _perspective_keypoints_dispatch( ) -> tv_tensors.BoundingBoxes: output = perspectice_keypoints( inpt.as_subclass(torch.Tensor), - canvas_size=inpt.canvas_size, startpoints=startpoints, - endpoints=endpoints, coefficients=coefficients, + canvas_size=inpt.canvas_size, + startpoints=startpoints, + endpoints=endpoints, + coefficients=coefficients, ) return tv_tensors.wrap(output, like=inpt) @@ -2060,11 +2078,7 @@ def _create_identity_grid(size: Tuple[int, int], device: torch.device, dtype: to return base_grid -def elastic_keypoints( - kp: torch.Tensor, - canvas_size: Tuple[int, int], - displacement: torch.Tensor -) -> torch.Tensor: +def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: expected_shape = (1, canvas_size[0], canvas_size[1], 2) if not isinstance(displacement, torch.Tensor): raise TypeError("Argument displacement should be a Tensor") @@ -2093,12 +2107,8 @@ def elastic_keypoints( @_register_kernel_internal(elastic, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _elastic_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs -): - output = elastic_keypoints( - inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement - ) +def _elastic_keypoints_dispatch(inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs): + output = elastic_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement) return tv_tensors.wrap(output, like=inpt) @@ -2282,20 +2292,14 @@ def _center_crop_image_pil(image: PIL.Image.Image, output_size: List[int]) -> PI return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width) -def center_crop_keypoints( - inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int] -): +def center_crop_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int]): crop_height, crop_width = _center_crop_parse_output_size(output_size) crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size) - return crop_keypoints( - inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width - ) + return crop_keypoints(inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width) @_register_kernel_internal(center_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _center_crop_keypoints_dispatch( - inpt: tv_tensors.KeyPoints, output_size: List[int] -) -> tv_tensors.KeyPoints: +def _center_crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, output_size: List[int]) -> tv_tensors.KeyPoints: output, canvas_size = center_crop_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, output_size=output_size ) @@ -2438,14 +2442,21 @@ def _resized_crop_image_pil_dispatch( def resized_crop_keypoints( - kp: torch.Tensor, top: int, left: int, height: int, width: int, size: List[int], + kp: torch.Tensor, + top: int, + left: int, + height: int, + width: int, + size: List[int], ) -> Tuple[torch.Tensor, Tuple[int, int]]: kp, canvas_size = crop_keypoints(kp, top, left, height, width) return resize_keypoints(kp, size=size, canvas_size=canvas_size) @_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _resized_crop_dispatch(inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs): +def _resized_crop_dispatch( + inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs +): out, canvas_size = resized_crop_bounding_boxes( inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size ) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 022c1cf7f25..2a29d87d6d7 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -176,20 +176,16 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: return xyxy -def _xyxy_to_points( - bounding_boxes: torch.Tensor -) -> torch.Tensor: +def _xyxy_to_points(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2) -def convert_box_to_points( - bounding_boxes: tv_tensors.BoundingBoxes -) -> tv_tensors.KeyPoints: +def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, new_format=BoundingBoxFormat.XYXY, - inplace=False + inplace=False, ) return tv_tensors.KeyPoints(_xyxy_to_points(bbox), canvas_size=bounding_boxes.canvas_size) @@ -272,10 +268,7 @@ def _clamp_bounding_boxes( return out_boxes.to(in_dtype) -def clamp_keypoints( - inpt: torch.Tensor, - canvas_size: Tuple[int, int] -) -> torch.Tensor: +def clamp_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int]) -> torch.Tensor: if not torch.jit.is_scripting(): _log_api_usage_once(clamp_bounding_boxes) dtype = inpt.dtype diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index b4559ab95e4..42a85a2d9fe 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -329,8 +329,7 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo def sanitize_keypoints( - keypoints: torch.Tensor, - canvas_size: Optional[Tuple[int, int]] = None + keypoints: torch.Tensor, canvas_size: Optional[Tuple[int, int]] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. @@ -369,13 +368,15 @@ def sanitize_keypoints( "Set that to appropriate values or pass keypoints as a tv_tensors.KeyPoints object." ) valid = _get_sanitize_keypoints_mask( - keypoints, canvas_size=canvas_size, + keypoints, + canvas_size=canvas_size, ) return keypoints[valid], valid if not isinstance(keypoints, tv_tensors.KeyPoints): raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") valid = _get_sanitize_keypoints_mask( - keypoints, canvas_size=keypoints.canvas_size, + keypoints, + canvas_size=keypoints.canvas_size, ) return tv_tensors.wrap(keypoints[valid], like=keypoints), valid diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 3a56bf88330..e1c6b2202df 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -1,13 +1,14 @@ from typing import TypeVar + import torch from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat from ._image import Image +from ._keypoints import KeyPoints from ._mask import Mask from ._torch_function_helpers import set_return_type from ._tv_tensor import TVTensor from ._video import Video -from ._keypoints import KeyPoints _WRAP_LIKE_T = TypeVar("_WRAP_LIKE_T", bound=TVTensor) @@ -37,12 +38,19 @@ def wrap(wrappee: torch.Tensor, *, like: _WRAP_LIKE_T, **kwargs) -> _WRAP_LIKE_T canvas_size=kwargs.get("canvas_size", like.canvas_size), ) elif isinstance(like, KeyPoints): - return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) # type:ignore + return KeyPoints(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size)) # type:ignore else: return wrappee.as_subclass(type(like)) __all__: list[str] = [ - "wrap", "KeyPoints", "Video", "TVTensor", "set_return_type", - "Mask", "Image", "BoundingBoxFormat", "BoundingBoxes" + "wrap", + "KeyPoints", + "Video", + "TVTensor", + "set_return_type", + "Mask", + "Image", + "BoundingBoxFormat", + "BoundingBoxes", ] diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index d044bb77824..e4e5e307511 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -1,7 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union + +from typing import Any, Mapping, MutableSequence, Optional, Sequence, Tuple, TYPE_CHECKING, Union + import torch from torch.utils._pytree import tree_flatten + from ._tv_tensor import TVTensor @@ -24,9 +27,13 @@ class KeyPoints(TVTensor): canvas_size: Tuple[int, int] def __new__( - cls, data: Any, *, dtype: Optional[torch.dtype] = None, + cls, + data: Any, + *, + dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, - requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + requires_grad: Optional[bool] = None, + canvas_size: Tuple[int, int], ): tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) if tensor.ndim == 1: @@ -42,15 +49,22 @@ def __new__( # Not read or defined at Runtime (only at linting time). # TODO: Add this to all TVTensors def __init__( - self, data: Any, *, dtype: Optional[torch.dtype] = None, + self, + data: Any, + *, + dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, - requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + requires_grad: Optional[bool] = None, + canvas_size: Tuple[int, int], ): ... @classmethod def _wrap_output( - cls, output: Any, args: Sequence[Any] = (), kwargs: Optional[Mapping[str, Any]] = None, + cls, + output: Any, + args: Sequence[Any] = (), + kwargs: Optional[Mapping[str, Any]] = None, ) -> Any: # Mostly copied over from the BoundingBoxes TVTensor, minor improvements. # This copies over the metadata. @@ -65,10 +79,7 @@ def _wrap_output( # NB: output is checked against sequence because it has already been checked against Tensor # Since a Tensor is a sequence of Tensor, had it not been the case, we may have had silent # or complex errors - output = tuple( - KeyPoints(part, canvas_size=canvas_size) - for part in output - ) + output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) elif isinstance(output, MutableSequence): for i, part in enumerate(output): output[i] = KeyPoints(part, canvas_size=canvas_size) From a19ec0b9daf2331b8e808c2815a27f5864fa8a4b Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Mon, 16 Dec 2024 13:14:48 +0100 Subject: [PATCH 06/60] Fixed the bugs found while testing --- test/common_utils.py | 7 +- test/test_transforms_v2.py | 16 +++-- test/test_transforms_v2_utils.py | 2 +- torchvision/transforms/v2/_augment.py | 2 +- torchvision/transforms/v2/_utils.py | 1 + .../transforms/v2/functional/_geometry.py | 65 ++++++++++++++----- torchvision/transforms/v2/functional/_meta.py | 5 ++ 7 files changed, 72 insertions(+), 26 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 0fafdce5d9e..bfc2a75ecb7 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -403,7 +403,9 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) -def make_keypoints(canvas_size: Tuple[int, int] = DEFAULT_SIZE, num_points: int | Sequence[int] = 4, dtype=None, device='cpu'): +def make_keypoints( + canvas_size: Tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device='cpu' +) -> tv_tensors.KeyPoints: """Make the KeyPoints for testing purposes""" if isinstance(num_points, int): num_points = [num_points] @@ -411,7 +413,8 @@ def make_keypoints(canvas_size: Tuple[int, int] = DEFAULT_SIZE, num_points: int y = torch.randint(0, canvas_size[0] - 1, half_point, dtype=dtype, device=device) x = torch.randint(0, canvas_size[1] - 1, half_point, dtype=dtype, device=device) points = torch.cat((x, y), dim=-1) - return tv_tensors.KeyPoints(points, canvas_size=canvas_size) + keypoints = tv_tensors.KeyPoints(points, canvas_size=canvas_size) + return keypoints def make_bounding_boxes( diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 35a0befb896..229ef58b0d8 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -329,7 +329,7 @@ def _make_transform_sample(transform, *, image_or_video, adapter): canvas_size=size, device=device, ), - keypoints=make_keypoints(), keypoints_degenerate=tv_tensors.KeyPoints( + keypoints=make_keypoints(canvas_size=size), keypoints_degenerate=tv_tensors.KeyPoints( [ [0, 1], # left edge [1, 0], # top edge @@ -2351,8 +2351,9 @@ def test_error(self, T): F.to_pil_image(imgs[0]), tv_tensors.Mask(torch.rand(12, 12)), tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12), - tv_tensors.KeyPoints(torch.rand(4, 2), canvas_size=(12, 12)) + tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)) ): + print(type(input_with_bad_type), cutmix_mixup) with pytest.raises(ValueError, match="does not support PIL images, "): cutmix_mixup(input_with_bad_type) @@ -2760,8 +2761,9 @@ def test_functional_signature(self, kernel, input_type): check_functional_kernel_signature_match(F.elastic, kernel=kernel, input_type=input_type) @pytest.mark.parametrize( - "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + "make_input", [ + make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints + ], ) def test_displacement_error(self, make_input): input = make_input() @@ -2773,8 +2775,10 @@ def test_displacement_error(self, make_input): F.elastic(input, displacement=torch.rand(F.get_size(input))) @pytest.mark.parametrize( - "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + "make_input", [ + make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, + make_keypoints + ], ) # ElasticTransform needs larger images to avoid the needed internal padding being larger than the actual image @pytest.mark.parametrize("size", [(163, 163), (72, 333), (313, 95)]) diff --git a/test/test_transforms_v2_utils.py b/test/test_transforms_v2_utils.py index cda255d0173..813a3cd93e6 100644 --- a/test/test_transforms_v2_utils.py +++ b/test/test_transforms_v2_utils.py @@ -26,7 +26,7 @@ ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.Mask), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), - ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.KeyPoints), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.KeyPoints,), True), ((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), False), ((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask, tv_tensors.KeyPoints), False), ((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), False), diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index 93d4ba45d65..4237eaba425 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -156,7 +156,7 @@ def forward(self, *inputs): flat_inputs, spec = tree_flatten(inputs) needs_transform_list = self._needs_transform_list(flat_inputs) - if has_any(flat_inputs, PIL.Image.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask): + if has_any(flat_inputs, PIL.Image.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints): raise ValueError(f"{type(self).__name__}() does not support PIL images, bounding boxes and masks.") labels = self._labels_getter(inputs) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 4e6e76418ec..91258829f3b 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -205,6 +205,7 @@ def query_size(flat_inputs: List[Any]) -> Tuple[int, int]: tv_tensors.Video, tv_tensors.Mask, tv_tensors.BoundingBoxes, + tv_tensors.KeyPoints, ), ) } diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 792965433f1..a27a6b158ae 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -65,9 +65,15 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: return horizontal_flip_image(mask) +def horizontal_flip_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int]): + kp[0] = kp[0].sub_(canvas_size[1]).neg_() + return kp + + @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def horizontal_flip_keypoints(kp: tv_tensors.KeyPoints): - return kp.sub_(kp.canvas_size[1]).neg_() +def _horizontal_flip_keypoints_dispatch(kp: tv_tensors.KeyPoints): + out = horizontal_flip_keypoints(kp.as_subclass(torch.Tensor), canvas_size=kp.canvas_size) + return tv_tensors.wrap(out, like=kp) def horizontal_flip_bounding_boxes( @@ -127,9 +133,10 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: return vertical_flip_image(mask) -@_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +@_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): - return kp.sub_(kp.canvas_size[1]).neg_() + kp[1] = kp[1].sub_(kp.canvas_size[0]).neg_() + return kp def vertical_flip_bounding_boxes( @@ -850,9 +857,9 @@ def _affine_keypoints_with_expand( return keypoints, canvas_size -@_register_kernel_internal(affine, tv_tensors.KeyPoints) def affine_keypoints( - keypoints: tv_tensors.KeyPoints, + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], angle: Union[int, float], translate: List[float], scale: float, @@ -860,8 +867,8 @@ def affine_keypoints( center: Optional[List[float]] = None, ): return _affine_keypoints_with_expand( - keypoints=keypoints.as_subclass(torch.Tensor), - canvas_size=keypoints.canvas_size, + keypoints=keypoints, + canvas_size=canvas_size, angle=angle, translate=translate, scale=scale, @@ -871,6 +878,28 @@ def affine_keypoints( ) +@_register_kernel_internal(affine, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _affine_keypoints_dispatch( + inpt: tv_tensors.BoundingBoxes, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + center: Optional[List[float]] = None, + **kwargs, +) -> tv_tensors.BoundingBoxes: + output, canvas_size = affine_keypoints( + inpt.as_subclass(torch.Tensor), + canvas_size=inpt.canvas_size, + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + ) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) + + def _affine_bounding_boxes_with_expand( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1188,7 +1217,7 @@ def rotate_keypoints( ) -@_register_kernel_internal(rotate, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) +@_register_kernel_internal(rotate, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _rotate_keypoints_dispatch( kp: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: @@ -1475,8 +1504,8 @@ def pad_keypoints( return clamp_keypoints(keypoints + pad, canvas_size), canvas_size -@_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensors_wrapper=False) -def _pad( +@_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _pad_keypoints_dispatch( keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs ) -> tv_tensors.KeyPoints: output, canvas_size = pad_keypoints( @@ -1589,7 +1618,7 @@ def crop_keypoints( @_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def crop_keypoints_dispatch( +def _crop_keypoints_dispatch( inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int ) -> tv_tensors.KeyPoints: out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) @@ -1786,11 +1815,12 @@ def perspectice_keypoints( denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3] if denom == 0: raise RuntimeError( - f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform bounding boxes. " + f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform keypoints. " f"Denominator is zero, denom={denom}" ) theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) + kp = torch.cat([kp, torch.ones(kp.shape[0], 1, device=kp.device)], dim=-1) numer_points = torch.matmul(kp, theta1.T) denom_points = torch.matmul(kp, theta2.T) @@ -1892,7 +1922,7 @@ def perspective_bounding_boxes( def _compute_perspective_thetas( - perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, / + perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, ) -> Tuple[torch.Tensor, torch.Tensor]: inv_coeffs = [ (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, @@ -2099,6 +2129,9 @@ def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displaceme index_xy = kp.to(dtype=torch.long) index_x, index_y = index_xy[:, 0], index_xy[:, 1] + # Unlike bounding boxes, this may not work well. + index_x.clamp_(0, inv_grid.shape[2] - 1) + index_y.clamp_(0, inv_grid.shape[1] - 1) t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype) transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5) @@ -2457,8 +2490,8 @@ def resized_crop_keypoints( def _resized_crop_dispatch( inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs ): - out, canvas_size = resized_crop_bounding_boxes( - inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size + out, canvas_size = resized_crop_keypoints( + inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size ) return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 2a29d87d6d7..a7f528ecc3e 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -121,6 +121,11 @@ def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> List[int] return list(bounding_box.canvas_size) +@_register_kernel_internal(get_size, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> List[int]: + return list(keypoints.canvas_size) + + def get_num_frames(inpt: torch.Tensor) -> int: if torch.jit.is_scripting(): return get_num_frames_video(inpt) From 5f4b18825fcfaa0499023d51f15be8dd6c6823c4 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Tue, 17 Dec 2024 11:48:48 +0100 Subject: [PATCH 07/60] Improved documentation to take KeyPoints into account --- docs/source/tv_tensors.rst | 1 + gallery/transforms/plot_tv_tensors.py | 11 ++++++++++- torchvision/transforms/v2/functional/_meta.py | 10 +++++++++- torchvision/tv_tensors/_keypoints.py | 5 ++++- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/docs/source/tv_tensors.rst b/docs/source/tv_tensors.rst index cb8a3c45fa9..d292012fdf8 100644 --- a/docs/source/tv_tensors.rst +++ b/docs/source/tv_tensors.rst @@ -21,6 +21,7 @@ info. Image Video + KeyPoints BoundingBoxFormat BoundingBoxes Mask diff --git a/gallery/transforms/plot_tv_tensors.py b/gallery/transforms/plot_tv_tensors.py index 5bce37aa374..2c6ebbf9031 100644 --- a/gallery/transforms/plot_tv_tensors.py +++ b/gallery/transforms/plot_tv_tensors.py @@ -46,11 +46,12 @@ # Under the hood, they are needed in :mod:`torchvision.transforms.v2` to correctly dispatch to the appropriate function # for the input data. # -# :mod:`torchvision.tv_tensors` supports four types of TVTensors: +# :mod:`torchvision.tv_tensors` supports five types of TVTensors: # # * :class:`~torchvision.tv_tensors.Image` # * :class:`~torchvision.tv_tensors.Video` # * :class:`~torchvision.tv_tensors.BoundingBoxes` +# * :class:`~torchvision.tv_tensors.KeyPoints` # * :class:`~torchvision.tv_tensors.Mask` # # What can I do with a TVTensor? @@ -96,6 +97,7 @@ # :class:`~torchvision.tv_tensors.BoundingBoxes` requires the coordinate format as well as the size of the # corresponding image (``canvas_size``) alongside the actual values. These # metadata are required to properly transform the bounding boxes. +# In a similar fashion, :class:`~torchvision.tv_tensors.KeyPoints` also require the ``canvas_size`` metadata to be added. bboxes = tv_tensors.BoundingBoxes( [[17, 16, 344, 495], [0, 10, 0, 10]], @@ -104,6 +106,13 @@ ) print(bboxes) + +keypoints = tv_tensors.KeyPoints( + [[17, 16], [344, 495], [0, 10], [0, 10]], + canvas_size=image.shape[-2:] +) +print(keypoints) + # %% # Using ``tv_tensors.wrap()`` # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index a7f528ecc3e..1a9692caa22 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -182,10 +182,18 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: def _xyxy_to_points(bounding_boxes: torch.Tensor) -> torch.Tensor: - return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2) + return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: + """Converts a set of bounding boxes to its edge points. + + Args: + bounding_boxes (tv_tensors.BoundingBoxes): A set of ``N`` bounding boxes (of shape ``[N, 4]``) + + Returns: + tv_tensors.KeyPoints: The edges, of shape ``[N, 4, 2]`` + """ bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index e4e5e307511..79997d004fd 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -13,6 +13,9 @@ class KeyPoints(TVTensor): Each point is represented by its XY coordinates. + KeyPoints can be converted from :class:`torchvision.tv_tensors.BoundingBoxes` + by :func:`torchvision.transforms.v2.functional.convert_box_to_points`. + Args: data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. canvas_size (two-tuple of ints): Height and width of the corresponding image or video. @@ -47,7 +50,7 @@ def __new__( if TYPE_CHECKING: # EVIL: Just so that MYPY+PYLANCE+others stop shouting that everything is wrong when initializeing the TVTensor # Not read or defined at Runtime (only at linting time). - # TODO: Add this to all TVTensors + # TODO: BOUNDING BOXES needs something similar def __init__( self, data: Any, From cabce1ca1514c1427999c74a657429713c6d586c Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Tue, 17 Dec 2024 11:51:54 +0100 Subject: [PATCH 08/60] Applied ufmt check --- torchvision/transforms/v2/functional/_geometry.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index a27a6b158ae..135ee3be8bb 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -1922,7 +1922,10 @@ def perspective_bounding_boxes( def _compute_perspective_thetas( - perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, + perspective_coeffs: List[float], + dtype: torch.dtype, + device: torch.device, + denom: float, ) -> Tuple[torch.Tensor, torch.Tensor]: inv_coeffs = [ (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, From d1b27ad96a1e05f99593dd6320abbb52ee426602 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Tue, 17 Dec 2024 14:52:16 +0100 Subject: [PATCH 09/60] Fixed the hflip not being along the right coordinate --- torchvision/transforms/v2/functional/_geometry.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 135ee3be8bb..7e2c8785692 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -66,7 +66,7 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: def horizontal_flip_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int]): - kp[0] = kp[0].sub_(canvas_size[1]).neg_() + kp[..., 0] = kp[..., 0].sub_(canvas_size[1]).neg_() return kp @@ -135,7 +135,7 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: @_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): - kp[1] = kp[1].sub_(kp.canvas_size[0]).neg_() + kp[..., 1] = kp[..., 1].sub_(kp.canvas_size[0]).neg_() return kp @@ -363,8 +363,8 @@ def resize_keypoints( w_ratio = new_width / old_width h_ratio = new_height / old_height - ratios = torch.tensor([w_ratio, h_ratio]) - kp.data = kp.data.mul(ratios).to(kp.dtype) + ratios = torch.tensor([w_ratio, h_ratio], device=kp.device) + kp = kp.mul(ratios).to(kp.dtype) return kp, (new_height, new_width) @@ -880,14 +880,14 @@ def affine_keypoints( @_register_kernel_internal(affine, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _affine_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, + inpt: tv_tensors.KeyPoints, angle: Union[int, float], translate: List[float], scale: float, shear: List[float], center: Optional[List[float]] = None, **kwargs, -) -> tv_tensors.BoundingBoxes: +) -> tv_tensors.KeyPoints: output, canvas_size = affine_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, @@ -2490,7 +2490,7 @@ def resized_crop_keypoints( @_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _resized_crop_dispatch( +def _resized_crop_keypoints_dispatch( inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs ): out, canvas_size = resized_crop_keypoints( From 5a8c5b422b26374203369bf39452f32f5817b834 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:36:58 +0200 Subject: [PATCH 10/60] Fixed order of arguments --- torchvision/tv_tensors/_keypoints.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 79997d004fd..5e184053c0a 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -33,10 +33,10 @@ def __new__( cls, data: Any, *, + canvas_size: Tuple[int, int], dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - canvas_size: Tuple[int, int], ): tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) if tensor.ndim == 1: @@ -55,10 +55,10 @@ def __init__( self, data: Any, *, + canvas_size: Tuple[int, int], dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - canvas_size: Tuple[int, int], ): ... From dea31e266a8ce114d6815b611c555cfae59789ed Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:41:18 +0200 Subject: [PATCH 11/60] Reworked logic of the conditions to better handle mutable/non mutable sequences in wrap_output --- torchvision/tv_tensors/_keypoints.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 5e184053c0a..e00c58d5134 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -78,14 +78,16 @@ def _wrap_output( if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): output = KeyPoints(output, canvas_size=canvas_size) - elif isinstance(output, tuple): - # NB: output is checked against sequence because it has already been checked against Tensor - # Since a Tensor is a sequence of Tensor, had it not been the case, we may have had silent - # or complex errors - output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) elif isinstance(output, MutableSequence): + # For lists and list-like object we don't try to create a new object, we just set the values in the list + # This allows us to conserve the type of complex list-like object that may not follow the initialization API of lists for i, part in enumerate(output): output[i] = KeyPoints(part, canvas_size=canvas_size) + elif isinstance(output, Sequence): + # Non-mutable sequences handled here (like tuples) + # Every sequence that is not a mutable sequence is a non-mutable sequence + # We have to use a tuple here, since we know its initialization api, unlike for `output` + output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) return output def __repr__(self, *, tensor_contents: Any = None) -> str: From 71e20a540ddac6f6161006b98d356fedf72e758b Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:43:52 +0200 Subject: [PATCH 12/60] Renamed out variable to be more similar with _resized_crop_bounding_boxes_dispatch --- torchvision/transforms/v2/functional/_geometry.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 7e2c8785692..488e9c70473 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2493,10 +2493,10 @@ def resized_crop_keypoints( def _resized_crop_keypoints_dispatch( inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs ): - out, canvas_size = resized_crop_keypoints( + output, canvas_size = resized_crop_keypoints( inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size ) - return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) def resized_crop_bounding_boxes( From 2f77527a3c87d28f1dea689db202cb4070445ea4 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:45:16 +0200 Subject: [PATCH 13/60] renamed _xyxy_to_points to _xyxy_to_keypoints for consistency --- torchvision/transforms/v2/functional/_meta.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index a9f6ed1b782..d45d2a534f0 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -181,8 +181,7 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: return xyxy - -def _xyxy_to_points(bounding_boxes: torch.Tensor) -> torch.Tensor: +def _xyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] @@ -201,7 +200,7 @@ def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensor new_format=BoundingBoxFormat.XYXY, inplace=False, ) - return tv_tensors.KeyPoints(_xyxy_to_points(bbox), canvas_size=bounding_boxes.canvas_size) + return tv_tensors.KeyPoints(_xyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) def _cxcywhr_to_xywhr(cxcywhr: torch.Tensor, inplace: bool) -> torch.Tensor: From 517a6dedb5730177d582ee815265d9012bb71b14 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 20:12:07 +0200 Subject: [PATCH 14/60] clarified _xyxy_to_points and changed the name of its caller for the sake of consistency with the other bounding_boxes converters in _meta.py --- test/test_transforms_v2.py | 20 +++++++++++++++++++ .../transforms/v2/functional/__init__.py | 2 +- torchvision/transforms/v2/functional/_meta.py | 3 ++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index aaced22dc4f..e77489c6206 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6260,3 +6260,23 @@ def test_different_sizes(self, make_input1, make_input2, query): def test_no_valid_input(self, query): with pytest.raises(TypeError, match="No image"): query(["blah"]) + + @pytest.mark.parametrize( + 'boxes', [ + tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4)) + ] + ) + def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): + # TODO: this test can't handle rotated boxes yet + kp = F.convert_bounding_boxes_to_points(boxes) + assert kp.shape == boxes.shape + (2, ) + assert kp.dtype == boxes.dtype + # kp is a list of A, B, C, D polygons. + # If we use A | C, we should get back the XYXY format of bounding box + reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes( + reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size + ), new_format=boxes.format + ) + assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}" diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index ec649759a72..e32ef73f7c1 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -5,7 +5,7 @@ from ._meta import ( clamp_bounding_boxes, convert_bounding_box_format, - convert_box_to_points, + convert_bounding_boxes_to_points, get_dimensions_image, get_dimensions_video, get_dimensions, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index d45d2a534f0..e24e3817be4 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -185,7 +185,7 @@ def _xyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] -def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: +def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: """Converts a set of bounding boxes to its edge points. Args: @@ -194,6 +194,7 @@ def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensor Returns: tv_tensors.KeyPoints: The edges, of shape ``[N, 4, 2]`` """ + # TODO: support rotated BBOX bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, From 63ed4a5a8b041b534c54481fd0cc7b3407bde802 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 20:13:44 +0200 Subject: [PATCH 15/60] Renamed half_point to more explicit single_coord_shape --- test/common_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index eca7997c102..d01d365a21a 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -409,9 +409,9 @@ def make_keypoints( """Make the KeyPoints for testing purposes""" if isinstance(num_points, int): num_points = [num_points] - half_point: Tuple[int, ...] = tuple(num_points) + (1,) - y = torch.randint(0, canvas_size[0] - 1, half_point, dtype=dtype, device=device) - x = torch.randint(0, canvas_size[1] - 1, half_point, dtype=dtype, device=device) + single_coord_shape: Tuple[int, ...] = tuple(num_points) + (1,) + y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device) + x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device) points = torch.cat((x, y), dim=-1) keypoints = tv_tensors.KeyPoints(points, canvas_size=canvas_size) return keypoints From 166c1ecc2fbcf5e720486ff52de3d3fb5e922f34 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 20:48:26 +0200 Subject: [PATCH 16/60] Integrated KeyPoints better in the transforms. It now warns alongside BoundingBoxes and Masks in RandomErasing, AutoAugment, FiveCrop, and SanitizeBoundingBoxes now can handle them and its documentation now states that the underlying logic relies on masks and keypoints being a given shape --- test/test_transforms_v2.py | 2 +- torchvision/transforms/v2/_augment.py | 2 +- torchvision/transforms/v2/_auto_augment.py | 3 ++- torchvision/transforms/v2/_geometry.py | 4 +-- torchvision/transforms/v2/_misc.py | 31 +++++++++++++--------- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index e77489c6206..f501d9b7619 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3487,7 +3487,7 @@ def _sample_input_adapter(self, transform, input, device): adapted_input = {} image_or_video_found = False for key, value in input.items(): - if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): # AA transforms don't support bounding boxes or masks continue elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor, PIL.Image.Image)): diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index 4237eaba425..3a4cc8ee29d 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -89,7 +89,7 @@ def __init__( self._log_ratio = torch.log(torch.tensor(self.ratio)) def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any: - if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): warnings.warn( f"{type(self).__name__}() is currently passing through inputs of type " f"tv_tensors.{type(inpt).__name__}. This will likely change in the future." diff --git a/torchvision/transforms/v2/_auto_augment.py b/torchvision/transforms/v2/_auto_augment.py index 4dd7ba343aa..1ff32629f38 100644 --- a/torchvision/transforms/v2/_auto_augment.py +++ b/torchvision/transforms/v2/_auto_augment.py @@ -46,7 +46,7 @@ def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, def _flatten_and_extract_image_or_video( self, inputs: Any, - unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask), + unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), ) -> Tuple[Tuple[List[Any], TreeSpec, int], ImageOrVideo]: flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0]) needs_transform_list = self._needs_transform_list(flat_inputs) @@ -194,6 +194,7 @@ class AutoAugment(_AutoAugmentBase): fill (sequence or number, optional): Pixel fill value for the area outside the transformed image. If given a number, the value is used for all bands respectively. """ + _v1_transform_cls = _transforms.AutoAugment _AUGMENTATION_SPACE = { diff --git a/torchvision/transforms/v2/_geometry.py b/torchvision/transforms/v2/_geometry.py index c615515b943..02d8a98a7a9 100644 --- a/torchvision/transforms/v2/_geometry.py +++ b/torchvision/transforms/v2/_geometry.py @@ -356,7 +356,7 @@ def __init__(self, size: Union[int, Sequence[int]]) -> None: self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any: - if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): warnings.warn( f"{type(self).__name__}() is currently passing through inputs of type " f"tv_tensors.{type(inpt).__name__}. This will likely change in the future." @@ -401,7 +401,7 @@ def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) self.vertical_flip = vertical_flip def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any: - if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): warnings.warn( f"{type(self).__name__}() is currently passing through inputs of type " f"tv_tensors.{type(inpt).__name__}. This will likely change in the future." diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index f0b83c14bd8..b389db0c7d4 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,15 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform -from ._utils import ( - _parse_labels_getter, - _setup_number_or_seq, - _setup_size, - get_all_keypoints, - get_bounding_boxes, - has_any, - is_pure_tensor, -) +from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_bounding_boxes, has_any, is_pure_tensor # TODO: do we want/need to expose this? @@ -348,9 +340,9 @@ def transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class SanitizeBoundingBoxes(Transform): - """Remove degenerate/invalid bounding boxes and their corresponding labels and masks. + """Remove degenerate/invalid bounding boxes and their corresponding labels, masks and keypoints. - This transform removes bounding boxes and their associated labels/masks that: + This transform removes bounding boxes and their associated labels, masks and keypoints that: - are below a given ``min_size`` or ``min_area``: by default this also removes degenerate boxes that have e.g. X2 <= X1. - have any coordinate outside of their corresponding image. You may want to @@ -366,6 +358,14 @@ class SanitizeBoundingBoxes(Transform): may modify bounding boxes but once at the end should be enough in most cases. + .. note:: + This transform requires that any :class:`~torchvision.tv_tensor.KeyPoints` or + :class:`~torchvision.tv_tensor.Mask` provided has to match the bounding boxes in shape. + + If the bounding boxes are of shape ``[N, K]``, then the + KeyPoints have to be of shape ``[N, ..., 2]`` or ``[N, 2]`` + and the masks have to be of shape ``[N, ..., H, W]`` or ``[N, H, W]`` + Args: min_size (float, optional): The size below which bounding boxes are removed. Default is 1. min_area (float, optional): The area below which bounding boxes are removed. Default is 1. @@ -445,10 +445,15 @@ def forward(self, *inputs: Any) -> Any: return tree_unflatten(flat_outputs, spec) def transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + # For every object in the flattened input of the `forward` method, we apply transform + # The params contain the list of valid indices of the (N, K) bbox set + + # We suppose here that any KeyPoints or Masks TVTensors is of shape (N, ..., 2) and (N, ..., H, W) respectively + # TODO: check this. is_label = params["labels"] is not None and any(inpt is label for label in params["labels"]) - is_bounding_boxes_or_mask = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)) + is_bbox_mask_or_kpoints = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints)) - if not (is_label or is_bounding_boxes_or_mask): + if not (is_label or is_bbox_mask_or_kpoints): return inpt output = inpt[params["valid"]] From 1cc3b6fb80a15a12d8b2a7172f2449472cac35c1 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 21:08:26 +0200 Subject: [PATCH 17/60] Fixed _geometry.py post botched merge request --- .../transforms/v2/functional/_geometry.py | 148 +++++++++--------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index c747a9af80c..a74a211b9e7 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -66,15 +66,15 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: return horizontal_flip_image(mask) -def horizontal_flip_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int]): - kp[..., 0] = kp[..., 0].sub_(canvas_size[1]).neg_() - return kp +def horizontal_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]): + keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1]).neg_() + return keypoints @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _horizontal_flip_keypoints_dispatch(kp: tv_tensors.KeyPoints): - out = horizontal_flip_keypoints(kp.as_subclass(torch.Tensor), canvas_size=kp.canvas_size) - return tv_tensors.wrap(out, like=kp) +def _horizontal_flip_keypoints_dispatch(keypoints: tv_tensors.KeyPoints): + out = horizontal_flip_keypoints(keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size) + return tv_tensors.wrap(out, like=keypoints) def horizontal_flip_bounding_boxes( @@ -135,9 +135,9 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: @_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): - kp[..., 1] = kp[..., 1].sub_(kp.canvas_size[0]).neg_() - return kp +def vertical_flip_keypoints(keypoints: tv_tensors.KeyPoints): + keypoints[..., 1] = keypoints[..., 1].sub_(keypoints.canvas_size[0]).neg_() + return keypoints def vertical_flip_bounding_boxes( @@ -352,9 +352,9 @@ def _resize_mask_dispatch( def resize_keypoints( - kp: torch.Tensor, - size: Optional[List[int]], - canvas_size: Tuple[int, int], + keypoints: torch.Tensor, + size: Optional[list[int]], + canvas_size: tuple[int, int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = True, @@ -364,29 +364,29 @@ def resize_keypoints( w_ratio = new_width / old_width h_ratio = new_height / old_height - ratios = torch.tensor([w_ratio, h_ratio], device=kp.device) - kp = kp.mul(ratios).to(kp.dtype) + ratios = torch.tensor([w_ratio, h_ratio], device=keypoints.device) + keypoints = keypoints.mul(ratios).to(keypoints.dtype) - return kp, (new_height, new_width) + return keypoints, (new_height, new_width) @_register_kernel_internal(resize, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _resize_keypoints_dispatch( - kp: tv_tensors.KeyPoints, - size: Optional[List[int]], + keypoints: tv_tensors.KeyPoints, + size: Optional[list[int]], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = True, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( - kp.as_subclass(torch.Tensor), + keypoints.as_subclass(torch.Tensor), size, - canvas_size=kp.canvas_size, + canvas_size=keypoints.canvas_size, interpolation=interpolation, max_size=max_size, antialias=antialias, ) - return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) def resize_bounding_boxes( @@ -816,14 +816,14 @@ def _affine_image_pil( def _affine_keypoints_with_expand( keypoints: torch.Tensor, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], angle: Union[int, float], - translate: List[float], + translate: list[float], scale: float, - shear: List[float], - center: Optional[List[float]] = None, + shear: list[float], + center: Optional[list[float]] = None, expand: bool = False, -) -> Tuple[torch.Tensor, Tuple[int, int]]: +) -> tuple[torch.Tensor, tuple[int, int]]: if keypoints.numel() == 0: return keypoints, canvas_size @@ -860,12 +860,12 @@ def _affine_keypoints_with_expand( def affine_keypoints( keypoints: torch.Tensor, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], angle: Union[int, float], - translate: List[float], + translate: list[float], scale: float, - shear: List[float], - center: Optional[List[float]] = None, + shear: list[float], + center: Optional[list[float]] = None, ): return _affine_keypoints_with_expand( keypoints=keypoints, @@ -883,10 +883,10 @@ def affine_keypoints( def _affine_keypoints_dispatch( inpt: tv_tensors.KeyPoints, angle: Union[int, float], - translate: List[float], + translate: list[float], scale: float, - shear: List[float], - center: Optional[List[float]] = None, + shear: list[float], + center: Optional[list[float]] = None, **kwargs, ) -> tv_tensors.KeyPoints: output, canvas_size = affine_keypoints( @@ -1203,9 +1203,9 @@ def rotate_keypoints( angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, expand: bool = False, - center: Optional[List[float]] = None, + center: Optional[list[float]] = None, fill: _FillTypeJIT = None, -) -> Tuple[torch.Tensor, Tuple[int, int]]: +) -> tuple[torch.Tensor, tuple[int, int]]: return _affine_keypoints_with_expand( keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, @@ -1220,10 +1220,10 @@ def rotate_keypoints( @_register_kernel_internal(rotate, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _rotate_keypoints_dispatch( - kp: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs + keypoints: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: - out, canvas_size = rotate_keypoints(kp, angle, center=center, expand=expand, **kwargs) - return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + out, canvas_size = rotate_keypoints(keypoints, angle, center=center, expand=expand, **kwargs) + return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) def rotate_bounding_boxes( @@ -1490,7 +1490,7 @@ def pad_mask( def pad_keypoints( - keypoints: torch.Tensor, canvas_size: Tuple[int, int], padding: List[int], padding_mode: str = "constant" + keypoints: torch.Tensor, canvas_size: tuple[int, int], padding: list[int], padding_mode: str = "constant" ): SUPPORTED_MODES = ["constant"] if padding_mode not in SUPPORTED_MODES: @@ -1507,7 +1507,7 @@ def pad_keypoints( @_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _pad_keypoints_dispatch( - keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs + keypoints: tv_tensors.KeyPoints, padding: list[int], padding_mode: str = "constant", **kwargs ) -> tv_tensors.KeyPoints: output, canvas_size = pad_keypoints( keypoints.as_subclass(torch.Tensor), @@ -1605,17 +1605,17 @@ def crop_image(image: torch.Tensor, top: int, left: int, height: int, width: int def crop_keypoints( - kp: torch.Tensor, + keypoints: torch.Tensor, top: int, left: int, height: int, width: int, -) -> Tuple[torch.Tensor, Tuple[int, int]]: +) -> tuple[torch.Tensor, tuple[int, int]]: - kp.sub_(torch.tensor([left, top], dtype=kp.dtype, device=kp.device)) + keypoints.sub_(torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device)) canvas_size = (height, width) - return clamp_keypoints(kp, canvas_size=canvas_size), canvas_size + return clamp_keypoints(keypoints, canvas_size=canvas_size), canvas_size @_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @@ -1800,16 +1800,16 @@ def _perspective_image_pil( def perspectice_keypoints( - kp: torch.Tensor, - canvas_size: Tuple[int, int], - startpoints: Optional[List[List[int]]], - endpoints: Optional[List[List[int]]], - coefficients: Optional[List[float]] = None, + keypoints: torch.Tensor, + canvas_size: tuple[int, int], + startpoints: Optional[list[list[int]]], + endpoints: Optional[list[list[int]]], + coefficients: Optional[list[float]] = None, ): - if kp.numel() == 0: - return kp - dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 - device = kp.device + if keypoints.numel() == 0: + return keypoints + dtype = keypoints.dtype if torch.is_floating_point(keypoints) else torch.float32 + device = keypoints.device perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients) @@ -1821,10 +1821,10 @@ def perspectice_keypoints( ) theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) - kp = torch.cat([kp, torch.ones(kp.shape[0], 1, device=kp.device)], dim=-1) + keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=keypoints.device)], dim=-1) - numer_points = torch.matmul(kp, theta1.T) - denom_points = torch.matmul(kp, theta2.T) + numer_points = torch.matmul(keypoints, theta1.T) + denom_points = torch.matmul(keypoints, theta2.T) transformed_points = numer_points.div_(denom_points) return clamp_keypoints(transformed_points, canvas_size) @@ -1832,9 +1832,9 @@ def perspectice_keypoints( @_register_kernel_internal(perspective, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _perspective_keypoints_dispatch( inpt: tv_tensors.BoundingBoxes, - startpoints: Optional[List[List[int]]], - endpoints: Optional[List[List[int]]], - coefficients: Optional[List[float]] = None, + startpoints: Optional[list[list[int]]], + endpoints: Optional[list[list[int]]], + coefficients: Optional[list[float]] = None, **kwargs, ) -> tv_tensors.BoundingBoxes: output = perspectice_keypoints( @@ -1923,11 +1923,11 @@ def perspective_bounding_boxes( def _compute_perspective_thetas( - perspective_coeffs: List[float], + perspective_coeffs: list[float], dtype: torch.dtype, device: torch.device, denom: float, -) -> Tuple[torch.Tensor, torch.Tensor]: +) -> tuple[torch.Tensor, torch.Tensor]: inv_coeffs = [ (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, (-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom, @@ -2112,18 +2112,18 @@ def _create_identity_grid(size: tuple[int, int], device: torch.device, dtype: to return base_grid -def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: +def elastic_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: expected_shape = (1, canvas_size[0], canvas_size[1], 2) if not isinstance(displacement, torch.Tensor): raise TypeError("Argument displacement should be a Tensor") elif displacement.shape != expected_shape: raise ValueError(f"Argument displacement shape should be {expected_shape}, but given {displacement.shape}") - if kp.numel() == 0: - return kp + if keypoints.numel() == 0: + return keypoints - device = kp.device - dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 + device = keypoints.device + dtype = keypoints.dtype if torch.is_floating_point(keypoints) else torch.float32 if displacement.dtype != dtype or displacement.device != device: displacement = displacement.to(dtype=dtype, device=device) @@ -2131,7 +2131,7 @@ def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displaceme id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype) inv_grid = id_grid.sub_(displacement) - index_xy = kp.to(dtype=torch.long) + index_xy = keypoints.to(dtype=torch.long) index_x, index_y = index_xy[:, 0], index_xy[:, 1] # Unlike bounding boxes, this may not work well. index_x.clamp_(0, inv_grid.shape[2] - 1) @@ -2329,14 +2329,14 @@ def _center_crop_image_pil(image: PIL.Image.Image, output_size: list[int]) -> PI return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width) -def center_crop_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int]): +def center_crop_keypoints(inpt: torch.Tensor, canvas_size: tuple[int, int], output_size: list[int]): crop_height, crop_width = _center_crop_parse_output_size(output_size) crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size) return crop_keypoints(inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width) @_register_kernel_internal(center_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _center_crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, output_size: List[int]) -> tv_tensors.KeyPoints: +def _center_crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, output_size: list[int]) -> tv_tensors.KeyPoints: output, canvas_size = center_crop_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, output_size=output_size ) @@ -2479,20 +2479,20 @@ def _resized_crop_image_pil_dispatch( def resized_crop_keypoints( - kp: torch.Tensor, + keypoints: torch.Tensor, top: int, left: int, height: int, width: int, - size: List[int], -) -> Tuple[torch.Tensor, Tuple[int, int]]: - kp, canvas_size = crop_keypoints(kp, top, left, height, width) - return resize_keypoints(kp, size=size, canvas_size=canvas_size) + size: list[int], +) -> tuple[torch.Tensor, tuple[int, int]]: + keypoints, canvas_size = crop_keypoints(keypoints, top, left, height, width) + return resize_keypoints(keypoints, size=size, canvas_size=canvas_size) @_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _resized_crop_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs + inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: list[int], **kwargs ): output, canvas_size = resized_crop_keypoints( inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size From 841de7701955249951b4facddd80982ca23a7e3a Mon Sep 17 00:00:00 2001 From: Antoine Simoulin Date: Sat, 3 May 2025 16:37:51 -0700 Subject: [PATCH 18/60] Review python 3.9 type hint and lint --- test/common_utils.py | 6 +- test/test_transforms_v2.py | 50 ++++++----- test/test_transforms_v2_utils.py | 6 +- test/test_tv_tensors.py | 84 ++++++++++++------- torchvision/transforms/v2/_utils.py | 4 +- .../transforms/v2/functional/_geometry.py | 4 +- torchvision/transforms/v2/functional/_meta.py | 2 +- torchvision/transforms/v2/functional/_misc.py | 8 +- 8 files changed, 103 insertions(+), 61 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index bf0fe92ae3e..600cb5a13b7 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -8,8 +8,8 @@ import shutil import sys import tempfile -from typing import Sequence, Tuple import warnings +from collections.abc import Sequence from subprocess import CalledProcessError, check_output, STDOUT import numpy as np @@ -402,12 +402,12 @@ def make_image_pil(*args, **kwargs): def make_keypoints( - canvas_size: Tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device='cpu' + canvas_size: tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device="cpu" ) -> tv_tensors.KeyPoints: """Make the KeyPoints for testing purposes""" if isinstance(num_points, int): num_points = [num_points] - single_coord_shape: Tuple[int, ...] = tuple(num_points) + (1,) + single_coord_shape: tuple[int, ...] = tuple(num_points) + (1,) y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device) x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device) points = torch.cat((x, y), dim=-1) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 701d668b6f2..2cc72db15ea 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -231,10 +231,7 @@ def check_functional_kernel_signature_match(functional, *, kernel, input_type): if issubclass(input_type, tv_tensors.TVTensor): # We filter out metadata that is implicitly passed to the functional through the input tv_tensor, but has to be # explicitly passed to the kernel. - explicit_metadata = { - tv_tensors.BoundingBoxes: {"format", "canvas_size"}, - tv_tensors.KeyPoints: {"canvas_size"} - } + explicit_metadata = {tv_tensors.BoundingBoxes: {"format", "canvas_size"}, tv_tensors.KeyPoints: {"canvas_size"}} kernel_params = [param for param in kernel_params if param.name not in explicit_metadata.get(input_type, set())] functional_params = iter(functional_params) @@ -338,7 +335,8 @@ def _make_transform_sample(transform, *, image_or_video, adapter): canvas_size=size, device=device, ), - keypoints=make_keypoints(canvas_size=size), keypoints_degenerate=tv_tensors.KeyPoints( + keypoints=make_keypoints(canvas_size=size), + keypoints_degenerate=tv_tensors.KeyPoints( [ [0, 1], # left edge [1, 0], # top edge @@ -347,8 +345,10 @@ def _make_transform_sample(transform, *, image_or_video, adapter): [size[1], 0], # top right corner [1, size[0]], # bottom edge [0, size[0]], # bottom left corner - [size[1], size[0]] # bottom right corner - ], canvas_size=size, device=device + [size[1], size[0]], # bottom right corner + ], + canvas_size=size, + device=device, ), detection_mask=make_detection_masks(size, device=device), segmentation_mask=make_segmentation_mask(size, device=device), @@ -2362,7 +2362,7 @@ def test_error(self, T): F.to_pil_image(imgs[0]), tv_tensors.Mask(torch.rand(12, 12)), tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12), - tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)) + tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)), ): print(type(input_with_bad_type), cutmix_mixup) with pytest.raises(ValueError, match="does not support PIL images, "): @@ -2772,8 +2772,15 @@ def test_functional_signature(self, kernel, input_type): check_functional_kernel_signature_match(F.elastic, kernel=kernel, input_type=input_type) @pytest.mark.parametrize( - "make_input", [ - make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, ], ) def test_displacement_error(self, make_input): @@ -2786,9 +2793,15 @@ def test_displacement_error(self, make_input): F.elastic(input, displacement=torch.rand(F.get_size(input))) @pytest.mark.parametrize( - "make_input", [ - make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, - make_keypoints + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, ], ) # ElasticTransform needs larger images to avoid the needed internal padding being larger than the actual image @@ -6297,21 +6310,18 @@ def test_no_valid_input(self, query): query(["blah"]) @pytest.mark.parametrize( - 'boxes', [ - tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4)) - ] + "boxes", [tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))] ) def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): # TODO: this test can't handle rotated boxes yet kp = F.convert_bounding_boxes_to_points(boxes) - assert kp.shape == boxes.shape + (2, ) + assert kp.shape == boxes.shape + (2,) assert kp.dtype == boxes.dtype # kp is a list of A, B, C, D polygons. # If we use A | C, we should get back the XYXY format of bounding box reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes( - reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size - ), new_format=boxes.format + tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), + new_format=boxes.format, ) assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}" diff --git a/test/test_transforms_v2_utils.py b/test/test_transforms_v2_utils.py index 813a3cd93e6..dab6d525a38 100644 --- a/test/test_transforms_v2_utils.py +++ b/test/test_transforms_v2_utils.py @@ -68,7 +68,11 @@ def test_has_any(sample, types, expected): ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask, tv_tensors.KeyPoints), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), True), - ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True), + ( + (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), + (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), + True, + ), ( (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index a29c942db67..0c06bc9c929 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -2,7 +2,14 @@ import pytest import torch -from common_utils import assert_equal, make_bounding_boxes, make_image, make_keypoints, make_segmentation_mask, make_video +from common_utils import ( + assert_equal, + make_bounding_boxes, + make_image, + make_keypoints, + make_segmentation_mask, + make_video, +) from PIL import Image from torchvision import tv_tensors @@ -49,7 +56,26 @@ def test_bbox_dim_error(): tv_tensors.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32)) -@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 2)), [[0, 0,], [2, 2,]], [1, 2,]]) +@pytest.mark.parametrize( + "data", + [ + torch.randint(0, 32, size=(5, 2)), + [ + [ + 0, + 0, + ], + [ + 2, + 2, + ], + ], + [ + 1, + 2, + ], + ], +) def test_keypoints_instance(data): kpoint = tv_tensors.KeyPoints(data, canvas_size=(32, 32)) assert isinstance(kpoint, tv_tensors.KeyPoints) @@ -82,9 +108,9 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad): assert tv_tensor.requires_grad is expected_requires_grad -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) def test_isinstance(make_input): assert isinstance(make_input(), torch.Tensor) @@ -96,9 +122,9 @@ def test_wrapping_no_copy(): assert image.data_ptr() == tensor.data_ptr() -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) def test_to_wrapping(make_input): dp = make_input() @@ -108,9 +134,9 @@ def test_to_wrapping(make_input): assert dp_to.dtype is torch.float64 -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_to_tv_tensor_reference(make_input, return_type): tensor = torch.rand((3, 16, 16), dtype=torch.float64) @@ -124,9 +150,9 @@ def test_to_tv_tensor_reference(make_input, return_type): assert type(tensor) is torch.Tensor -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_clone_wrapping(make_input, return_type): dp = make_input() @@ -138,9 +164,9 @@ def test_clone_wrapping(make_input, return_type): assert dp_clone.data_ptr() != dp.data_ptr() -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_requires_grad__wrapping(make_input, return_type): dp = make_input(dtype=torch.float) @@ -155,9 +181,9 @@ def test_requires_grad__wrapping(make_input, return_type): assert dp_requires_grad.requires_grad -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_detach_wrapping(make_input, return_type): dp = make_input(dtype=torch.float).requires_grad_(True) @@ -212,9 +238,9 @@ def test_force_subclass_with_metadata(return_type): tv_tensors.set_return_type("tensor") -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_other_op_no_wrapping(make_input, return_type): dp = make_input() @@ -226,9 +252,9 @@ def test_other_op_no_wrapping(make_input, return_type): assert type(output) is (type(dp) if return_type == "TVTensor" else torch.Tensor) -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize( "op", [ @@ -245,9 +271,9 @@ def test_no_tensor_output_op_no_wrapping(make_input, op): assert type(output) is not type(dp) -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_inplace_op_no_wrapping(make_input, return_type): dp = make_input() diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 34fb8ee4170..5add3c7bc20 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -2,10 +2,10 @@ import collections.abc import numbers -from collections.abc import Sequence +from collections.abc import Iterable, Sequence from contextlib import suppress -from typing import Any, Callable, Literal, Sequence, Iterable +from typing import Any, Callable, Literal import PIL.Image import torch diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index a74a211b9e7..44eafcdb31f 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2112,7 +2112,9 @@ def _create_identity_grid(size: tuple[int, int], device: torch.device, dtype: to return base_grid -def elastic_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: +def elastic_keypoints( + keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor +) -> torch.Tensor: expected_shape = (1, canvas_size[0], canvas_size[1], 2) if not isinstance(displacement, torch.Tensor): raise TypeError("Argument displacement should be a Tensor") diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index bd3cbd3c699..348e14bda14 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -122,7 +122,7 @@ def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> list[int] @_register_kernel_internal(get_size, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> List[int]: +def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> list[int]: return list(keypoints.canvas_size) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index ccd750eba0f..bddd8e27721 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -329,8 +329,8 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo def sanitize_keypoints( - keypoints: torch.Tensor, canvas_size: Optional[Tuple[int, int]] = None -) -> Tuple[torch.Tensor, torch.Tensor]: + keypoints: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None +) -> tuple[torch.Tensor, torch.Tensor]: """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. This removes the keypoints that are outside of their corresponing image. @@ -345,7 +345,7 @@ def sanitize_keypoints( Args: keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed - canvas_size (Optional[Tuple[int, int]], optional): The canvas_size of the bounding boxes + canvas_size (Optional[tuple[int, int]], optional): The canvas_size of the bounding boxes (size of the corresponding image/video). Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. @@ -383,7 +383,7 @@ def sanitize_keypoints( def _get_sanitize_keypoints_mask( keypoints: torch.Tensor, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], ) -> torch.Tensor: image_h, image_w = canvas_size x = keypoints[:, 0] From ff6ab48397ef000d0b4a95fb3afe3c6dbe19588e Mon Sep 17 00:00:00 2001 From: Antoine Simoulin Date: Sat, 3 May 2025 16:55:53 -0700 Subject: [PATCH 19/60] Add specific keypoint tests --- test/test_transforms_v2.py | 569 +++++++++++++++++- .../transforms/v2/functional/__init__.py | 4 +- .../transforms/v2/functional/_geometry.py | 102 +++- torchvision/transforms/v2/functional/_meta.py | 36 +- 4 files changed, 671 insertions(+), 40 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 2cc72db15ea..e3d59dda6b8 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -574,6 +574,45 @@ def affine_bounding_boxes(bounding_boxes): ) +def reference_affine_keypoints_helper(keypoints, *, affine_matrix, new_canvas_size=None, clamp=True): + canvas_size = new_canvas_size or keypoints.canvas_size + + def affine_keypoints(keypoints): + dtype = keypoints.dtype + device = keypoints.device + + # Go to float before converting to prevent precision loss + x, y = keypoints.to(dtype=torch.float64, device="cpu", copy=True).squeeze(0).tolist() + + points = np.array([[x, y, 1.0]]) + transformed_points = np.matmul(points, affine_matrix.astype(points.dtype).T) + + output = torch.Tensor( + [ + float(transformed_points[0, 0]), + float(transformed_points[0, 1]), + ] + ) + + if clamp: + # It is important to clamp before casting, especially for CXCYWH format, dtype=int64 + output = F.clamp_keypoints( + output, + canvas_size=canvas_size, + ) + else: + # We leave the bounding box as float64 so the caller gets the full precision to perform any additional + # operation + dtype = output.dtype + + return output.to(dtype=dtype, device=device) + + return tv_tensors.KeyPoints( + torch.cat([affine_keypoints(k) for k in keypoints.reshape(-1, 2).unbind()], dim=0).reshape(keypoints.shape), + canvas_size=canvas_size, + ) + + class TestResize: INPUT_SIZE = (17, 11) OUTPUT_SIZES = [17, [17], (17,), None, [12, 13], (12, 13)] @@ -673,6 +712,28 @@ def test_kernel_bounding_boxes(self, format, size, use_max_size, dtype, device): check_scripted_vs_eager=not isinstance(size, int), ) + @pytest.mark.parametrize("size", OUTPUT_SIZES) + @pytest.mark.parametrize("use_max_size", [True, False]) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, size, use_max_size, dtype, device): + if not (max_size_kwarg := self._make_max_size_kwarg(use_max_size=use_max_size, size=size)): + return + + keypoints = make_keypoints( + canvas_size=self.INPUT_SIZE, + dtype=dtype, + device=device, + ) + check_kernel( + F.resize_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + size=size, + **max_size_kwarg, + check_scripted_vs_eager=not isinstance(size, int), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.resize_mask, make_mask(self.INPUT_SIZE), size=self.OUTPUT_SIZES[-1]) @@ -781,6 +842,28 @@ def _reference_resize_bounding_boxes(self, bounding_boxes, *, size, max_size=Non new_canvas_size=(new_height, new_width), ) + def _reference_resize_keypoints(self, keypoints, *, size, max_size=None): + old_height, old_width = keypoints.canvas_size + new_height, new_width = self._compute_output_size( + input_size=keypoints.canvas_size, size=size, max_size=max_size + ) + + if (old_height, old_width) == (new_height, new_width): + return keypoints + + affine_matrix = np.array( + [ + [new_width / old_width, 0, 0], + [0, new_height / old_height, 0], + ], + ) + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=affine_matrix, + new_canvas_size=(new_height, new_width), + ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("size", OUTPUT_SIZES) @pytest.mark.parametrize("use_max_size", [True, False]) @@ -797,6 +880,21 @@ def test_bounding_boxes_correctness(self, format, size, use_max_size, fn): self._check_output_size(bounding_boxes, actual, size=size, **max_size_kwarg) torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("size", OUTPUT_SIZES) + @pytest.mark.parametrize("use_max_size", [True, False]) + @pytest.mark.parametrize("fn", [F.resize, transform_cls_to_functional(transforms.Resize)]) + def test_keypoints_correctness(self, size, use_max_size, fn): + if not (max_size_kwarg := self._make_max_size_kwarg(use_max_size=use_max_size, size=size)): + return + + keypoints = make_keypoints(canvas_size=self.INPUT_SIZE) + + actual = fn(keypoints, size=size, **max_size_kwarg) + expected = self._reference_resize_keypoints(keypoints, size=size, **max_size_kwarg) + + self._check_output_size(keypoints, actual, size=size, **max_size_kwarg) + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("interpolation", set(transforms.InterpolationMode) - set(INTERPOLATION_MODES)) @pytest.mark.parametrize( "make_input", @@ -1039,6 +1137,16 @@ def test_kernel_bounding_boxes(self, format, dtype, device): canvas_size=bounding_boxes.canvas_size, ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + check_kernel( + F.horizontal_flip_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.horizontal_flip_mask, make_mask()) @@ -1097,6 +1205,16 @@ def _reference_horizontal_flip_bounding_boxes(self, bounding_boxes): return reference_affine_bounding_boxes_helper(bounding_boxes, affine_matrix=affine_matrix) + def _reference_horizontal_flip_keypoints(self, keypoints): + affine_matrix = np.array( + [ + [-1, 0, keypoints.canvas_size[1]], + [0, 1, 0], + ], + ) + + return reference_affine_keypoints_helper(keypoints, affine_matrix=affine_matrix) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize( "fn", [F.horizontal_flip, transform_cls_to_functional(transforms.RandomHorizontalFlip, p=1)] @@ -1109,6 +1227,17 @@ def test_bounding_boxes_correctness(self, format, fn): torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize( + "fn", [F.horizontal_flip, transform_cls_to_functional(transforms.RandomHorizontalFlip, p=1)] + ) + def test_keypoints_correctness(self, fn): + keypoints = make_keypoints() + + actual = fn(keypoints) + expected = self._reference_horizontal_flip_keypoints(keypoints) + + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize( "make_input", [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], @@ -1210,6 +1339,24 @@ def test_kernel_bounding_boxes(self, param, value, format, dtype, device): check_scripted_vs_eager=not (param == "shear" and isinstance(value, (int, float))), ) + @param_value_parametrization( + angle=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["angle"], + translate=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["translate"], + shear=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["shear"], + center=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["center"], + ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, param, value, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + self._check_kernel( + F.affine_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + **{param: value}, + check_scripted_vs_eager=not (param == "shear" and isinstance(value, (int, float))), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): self._check_kernel(F.affine_mask, make_mask()) @@ -1346,6 +1493,17 @@ def _reference_affine_bounding_boxes(self, bounding_boxes, *, angle, translate, ), ) + def _reference_affine_keypoints(self, keypoints, *, angle, translate, scale, shear, center): + if center is None: + center = [s * 0.5 for s in keypoints.canvas_size[::-1]] + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=self._compute_affine_matrix( + angle=angle, translate=translate, scale=scale, shear=shear, center=center + ), + ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) @@ -1392,6 +1550,50 @@ def test_transform_bounding_boxes_correctness(self, format, center, seed): torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) + @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) + @pytest.mark.parametrize("scale", _CORRECTNESS_AFFINE_KWARGS["scale"]) + @pytest.mark.parametrize("shear", _CORRECTNESS_AFFINE_KWARGS["shear"]) + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + def test_functional_keypoints_correctness(self, angle, translate, scale, shear, center): + keypoints = make_keypoints() + + actual = F.affine( + keypoints, + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + ) + expected = self._reference_affine_keypoints( + keypoints, + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + ) + + torch.testing.assert_close(actual, expected) + + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + @pytest.mark.parametrize("seed", list(range(5))) + def test_transform_keypoints_correctness(self, center, seed): + keypoints = make_keypoints() + + transform = transforms.RandomAffine(**self._CORRECTNESS_TRANSFORM_AFFINE_RANGES, center=center) + + torch.manual_seed(seed) + params = transform.make_params([keypoints]) + + torch.manual_seed(seed) + actual = transform(keypoints) + + expected = self._reference_affine_keypoints(keypoints, **params, center=center) + + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("degrees", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["degrees"]) @pytest.mark.parametrize("translate", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["translate"]) @pytest.mark.parametrize("scale", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["scale"]) @@ -1493,6 +1695,16 @@ def test_kernel_bounding_boxes(self, format, dtype, device): canvas_size=bounding_boxes.canvas_size, ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + check_kernel( + F.vertical_flip_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.vertical_flip_mask, make_mask()) @@ -1549,6 +1761,16 @@ def _reference_vertical_flip_bounding_boxes(self, bounding_boxes): return reference_affine_bounding_boxes_helper(bounding_boxes, affine_matrix=affine_matrix) + def _reference_vertical_flip_keypoints(self, keypoints): + affine_matrix = np.array( + [ + [1, 0, 0], + [0, -1, keypoints.canvas_size[0]], + ], + ) + + return reference_affine_keypoints_helper(keypoints, affine_matrix=affine_matrix) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("fn", [F.vertical_flip, transform_cls_to_functional(transforms.RandomVerticalFlip, p=1)]) def test_bounding_boxes_correctness(self, format, fn): @@ -1559,6 +1781,15 @@ def test_bounding_boxes_correctness(self, format, fn): torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("fn", [F.vertical_flip, transform_cls_to_functional(transforms.RandomVerticalFlip, p=1)]) + def test_keypoints_correctness(self, fn): + keypoints = make_keypoints() + + actual = fn(keypoints) + expected = self._reference_vertical_flip_keypoints(keypoints) + + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize( "make_input", [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], @@ -1636,6 +1867,27 @@ def test_kernel_bounding_boxes(self, param, value, format, dtype, device): **kwargs, ) + @param_value_parametrization( + angle=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["angle"], + expand=[False, True], + center=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["center"], + ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.uint8]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, param, value, dtype, device): + kwargs = {param: value} + if param != "angle": + kwargs["angle"] = self._MINIMAL_AFFINE_KWARGS["angle"] + + keypoints = make_keypoints(dtype=dtype, device=device) + + check_kernel( + F.rotate_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + **kwargs, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.rotate_mask, make_mask(), **self._MINIMAL_AFFINE_KWARGS) @@ -1823,6 +2075,71 @@ def test_transform_bounding_boxes_correctness(self, format, expand, center, seed torch.testing.assert_close(actual, expected) torch.testing.assert_close(F.get_size(actual), F.get_size(expected), atol=2 if expand else 0, rtol=0) + def _recenter_keypoints_after_expand(self, keypoints, *, recenter_xy): + x, y = recenter_xy + translate = [x, y] + return tv_tensors.wrap( + (keypoints.to(torch.float64) - torch.tensor(translate)).to(keypoints.dtype), like=keypoints + ) + + def _reference_rotate_keypoints(self, keypoints, *, angle, expand, center): + if center is None: + center = [s * 0.5 for s in keypoints.canvas_size[::-1]] + cx, cy = center + + a = np.cos(angle * np.pi / 180.0) + b = np.sin(angle * np.pi / 180.0) + affine_matrix = np.array( + [ + [a, b, cx - cx * a - b * cy], + [-b, a, cy + cx * b - a * cy], + ], + ) + + new_canvas_size, recenter_xy = self._compute_output_canvas_size( + expand=expand, canvas_size=keypoints.canvas_size, affine_matrix=affine_matrix + ) + + output = reference_affine_keypoints_helper( + keypoints, + affine_matrix=affine_matrix, + new_canvas_size=new_canvas_size, + clamp=False, + ) + + return F.clamp_keypoints(self._recenter_keypoints_after_expand(output, recenter_xy=recenter_xy)).to(keypoints) + + @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) + @pytest.mark.parametrize("expand", [False, True]) + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + def test_functional_keypoints_correctness(self, angle, expand, center): + keypoints = make_keypoints() + + actual = F.rotate(keypoints, angle=angle, expand=expand, center=center) + expected = self._reference_rotate_keypoints(keypoints, angle=angle, expand=expand, center=center) + + torch.testing.assert_close(actual, expected) + torch.testing.assert_close(F.get_size(actual), F.get_size(expected), atol=2 if expand else 0, rtol=0) + + @pytest.mark.parametrize("expand", [False, True]) + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + @pytest.mark.parametrize("seed", list(range(5))) + def test_transform_keypoints_correctness(self, expand, center, seed): + keypoints = make_keypoints() + + transform = transforms.RandomRotation(**self._CORRECTNESS_TRANSFORM_AFFINE_RANGES, expand=expand, center=center) + + torch.manual_seed(seed) + params = transform.make_params([keypoints]) + + torch.manual_seed(seed) + actual = transform(keypoints) + + expected = self._reference_rotate_keypoints(keypoints, **params, expand=expand, center=center) + + torch.testing.assert_close(actual, expected) + torch.testing.assert_close(F.get_size(actual), F.get_size(expected), atol=2 if expand else 0, rtol=0) + @pytest.mark.parametrize("degrees", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["degrees"]) @pytest.mark.parametrize("seed", list(range(10))) def test_transformmake_params_bounds(self, degrees, seed): @@ -2740,6 +3057,18 @@ def test_kernel_bounding_boxes(self, format, dtype, device): displacement=self._make_displacement(bounding_boxes), ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + + check_kernel( + F.elastic_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + displacement=self._make_displacement(keypoints), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): mask = make_mask() @@ -2872,7 +3201,7 @@ def test_kernel_image(self, kwargs, dtype, device): @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) @pytest.mark.parametrize("device", cpu_and_cuda()) - def test_kernel_bounding_box(self, kwargs, format, dtype, device): + def test_kernel_bounding_boxes(self, kwargs, format, dtype, device): bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format, dtype=dtype, device=device) check_kernel(F.crop_bounding_boxes, bounding_boxes, format=format, **kwargs) @@ -3057,6 +3386,54 @@ def test_transform_bounding_boxes_correctness(self, output_size, format, dtype, assert_equal(actual, expected) assert_equal(F.get_size(actual), F.get_size(expected)) + def _reference_crop_keypoints(self, keypoints, *, top, left, height, width): + affine_matrix = np.array( + [ + [1, 0, -left], + [0, 1, -top], + ], + ) + return reference_affine_keypoints_helper( + keypoints, affine_matrix=affine_matrix, new_canvas_size=(height, width) + ) + + @pytest.mark.parametrize("kwargs", CORRECTNESS_CROP_KWARGS) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_functional_keypoints_correctness(self, kwargs, dtype, device): + keypoints = make_keypoints(self.INPUT_SIZE, dtype=dtype, device=device) + + actual = F.crop(keypoints, **kwargs) + expected = self._reference_crop_keypoints(keypoints, **kwargs) + + assert_equal(actual, expected, atol=1, rtol=0) + assert_equal(F.get_size(actual), F.get_size(expected)) + + @pytest.mark.parametrize("output_size", [(17, 11), (11, 17), (11, 11)]) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + @pytest.mark.parametrize("seed", list(range(5))) + def test_transform_keypoints_correctness(self, output_size, dtype, device, seed): + input_size = (output_size[0] * 2, output_size[1] * 2) + keypoints = make_keypoints(input_size, dtype=dtype, device=device) + + transform = transforms.RandomCrop(output_size) + + with freeze_rng_state(): + torch.manual_seed(seed) + params = transform.make_params([keypoints]) + assert not params.pop("needs_pad") + del params["padding"] + assert params.pop("needs_crop") + + torch.manual_seed(seed) + actual = transform(keypoints) + + expected = self._reference_crop_keypoints(keypoints, **params) + + assert_equal(actual, expected) + assert_equal(F.get_size(actual), F.get_size(expected)) + def test_errors(self): with pytest.raises(ValueError, match="Please provide only two dimensions"): transforms.RandomCrop([10, 12, 14]) @@ -3795,6 +4172,31 @@ def _reference_resized_crop_bounding_boxes(self, bounding_boxes, *, top, left, h new_canvas_size=size, ) + def _reference_resized_crop_keypoints(self, keypoints, *, top, left, height, width, size): + new_height, new_width = size + + crop_affine_matrix = np.array( + [ + [1, 0, -left], + [0, 1, -top], + [0, 0, 1], + ], + ) + resize_affine_matrix = np.array( + [ + [new_width / width, 0, 0], + [0, new_height / height, 0], + [0, 0, 1], + ], + ) + affine_matrix = (resize_affine_matrix @ crop_affine_matrix)[:2, :] + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=affine_matrix, + new_canvas_size=size, + ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) def test_functional_bounding_boxes_correctness(self, format): bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format) @@ -3807,6 +4209,15 @@ def test_functional_bounding_boxes_correctness(self, format): assert_equal(actual, expected) assert_equal(F.get_size(actual), F.get_size(expected)) + def test_functional_keypoints_correctness(self): + keypoints = make_keypoints(self.INPUT_SIZE) + + actual = F.resized_crop(keypoints, **self.CROP_KWARGS, size=self.OUTPUT_SIZE) + expected = self._reference_resized_crop_keypoints(keypoints, **self.CROP_KWARGS, size=self.OUTPUT_SIZE) + + assert_equal(actual, expected) + assert_equal(F.get_size(actual), F.get_size(expected)) + def test_transform_errors_warnings(self): with pytest.raises(ValueError, match="provide only two dimensions"): transforms.RandomResizedCrop(size=(1, 2, 3)) @@ -3892,6 +4303,26 @@ def test_kernel_bounding_boxes_errors(self, padding_mode): padding_mode=padding_mode, ) + def test_kernel_keypoints(self): + keypoints = make_keypoints() + check_kernel( + F.pad_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + padding=[1], + ) + + @pytest.mark.parametrize("padding_mode", ["symmetric", "edge", "reflect"]) + def test_kernel_keypoints_errors(self, padding_mode): + keypoints = make_keypoints() + with pytest.raises(ValueError, match=f"'{padding_mode}' is not supported"): + F.pad_keypoints( + keypoints, + canvas_size=keypoints.canvas_size, + padding=[1], + padding_mode=padding_mode, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.pad_mask, make_mask(), padding=[1]) @@ -4035,6 +4466,17 @@ def test_kernel_bounding_boxes(self, output_size, format): check_scripted_vs_eager=not isinstance(output_size, int), ) + @pytest.mark.parametrize("output_size", OUTPUT_SIZES) + def test_kernel_keypoints(self, output_size): + keypoints = make_keypoints(self.INPUT_SIZE) + check_kernel( + F.center_crop_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + output_size=output_size, + check_scripted_vs_eager=not isinstance(output_size, int), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.center_crop_mask, make_mask(), output_size=self.OUTPUT_SIZES[0]) @@ -4114,6 +4556,37 @@ def test_bounding_boxes_correctness(self, output_size, format, dtype, device, fn assert_equal(actual, expected) + def _reference_center_crop_keypoints(self, keypoints, output_size): + image_height, image_width = keypoints.canvas_size + if isinstance(output_size, int): + output_size = (output_size, output_size) + elif len(output_size) == 1: + output_size *= 2 + crop_height, crop_width = output_size + + top = int(round((image_height - crop_height) / 2)) + left = int(round((image_width - crop_width) / 2)) + + affine_matrix = np.array( + [ + [1, 0, -left], + [0, 1, -top], + ], + ) + return reference_affine_keypoints_helper(keypoints, affine_matrix=affine_matrix, new_canvas_size=output_size) + + @pytest.mark.parametrize("output_size", OUTPUT_SIZES) + @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + @pytest.mark.parametrize("fn", [F.center_crop, transform_cls_to_functional(transforms.CenterCrop)]) + def test_keypoints_correctness(self, output_size, dtype, device, fn): + keypoints = make_keypoints(self.INPUT_SIZE, dtype=dtype, device=device) + + actual = fn(keypoints, output_size) + expected = self._reference_center_crop_keypoints(keypoints, output_size) + + assert_equal(actual, expected) + class TestPerspective: COEFFICIENTS = [ @@ -4201,6 +4674,39 @@ def test_kernel_bounding_boxes_error(self): coefficients=[0.0] * 8, ) + @param_value_parametrization( + coefficients=COEFFICIENTS, + start_end_points=START_END_POINTS, + ) + def test_kernel_keypoints(self, param, value): + if param == "start_end_points": + kwargs = dict(zip(["startpoints", "endpoints"], value)) + else: + kwargs = {"startpoints": None, "endpoints": None, param: value} + + keypoints = make_keypoints() + + check_kernel( + F.perspective_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + **kwargs, + ) + + def test_kernel_keypoints_error(self): + keypoints = make_keypoints() + canvas_size = keypoints.canvas_size + keypoints = keypoints.as_subclass(torch.Tensor) + + with pytest.raises(RuntimeError, match="Denominator is zero"): + F.perspective_keypoints( + keypoints, + canvas_size=canvas_size, + startpoints=None, + endpoints=None, + coefficients=[0.0] * 8, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.perspective_mask, make_mask(), **self.MINIMAL_KWARGS) @@ -4358,6 +4864,67 @@ def test_correctness_perspective_bounding_boxes(self, startpoints, endpoints, fo assert_close(actual, expected, rtol=0, atol=1) + def _reference_perspective_keypoints(self, keypoints, *, startpoints, endpoints): + canvas_size = keypoints.canvas_size + dtype = keypoints.dtype + device = keypoints.device + + coefficients = _get_perspective_coeffs(endpoints, startpoints) + + def perspective_keypoints(keypoints): + m1 = np.array( + [ + [coefficients[0], coefficients[1], coefficients[2]], + [coefficients[3], coefficients[4], coefficients[5]], + ] + ) + m2 = np.array( + [ + [coefficients[6], coefficients[7], 1.0], + [coefficients[6], coefficients[7], 1.0], + ] + ) + + # Go to float before converting to prevent precision loss + x, y = keypoints.to(dtype=torch.float64, device="cpu", copy=True).squeeze(0).tolist() + + points = np.array([[x, y, 1.0]]) + + numerator = points @ m1.T + denominator = points @ m2.T + transformed_points = numerator / denominator + + output = torch.Tensor( + [ + float(transformed_points[0, 0]), + float(transformed_points[0, 1]), + ] + ) + + # It is important to clamp before casting, especially for CXCYWH format, dtype=int64 + return F.clamp_keypoints( + output, + canvas_size=canvas_size, + ).to(dtype=dtype, device=device) + + return tv_tensors.KeyPoints( + torch.cat([perspective_keypoints(k) for k in keypoints.reshape(-1, 2).unbind()], dim=0).reshape( + keypoints.shape + ), + canvas_size=canvas_size, + ) + + @pytest.mark.parametrize(("startpoints", "endpoints"), START_END_POINTS) + @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_correctness_perspective_keypoints(self, startpoints, endpoints, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + + actual = F.perspective(keypoints, startpoints=startpoints, endpoints=endpoints) + expected = self._reference_perspective_keypoints(keypoints, startpoints=startpoints, endpoints=endpoints) + + assert_close(actual, expected, rtol=0, atol=1) + class TestEqualize: @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32]) diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index e32ef73f7c1..e651bbd9257 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -4,6 +4,7 @@ from ._meta import ( clamp_bounding_boxes, + clamp_keypoints, convert_bounding_box_format, convert_bounding_boxes_to_points, get_dimensions_image, @@ -16,6 +17,7 @@ get_num_channels_video, get_num_channels, get_size_bounding_boxes, + get_size_keypoints, get_size_image, get_size_mask, get_size_video, @@ -108,10 +110,10 @@ pad_keypoints, pad_mask, pad_video, - perspectice_keypoints, perspective, perspective_bounding_boxes, perspective_image, + perspective_keypoints, perspective_mask, perspective_video, resize, diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 44eafcdb31f..50bf20e8ba0 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -67,8 +67,10 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: def horizontal_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]): + shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1]).neg_() - return keypoints + return keypoints.reshape(shape) @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @@ -134,10 +136,11 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: return vertical_flip_image(mask) -@_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def vertical_flip_keypoints(keypoints: tv_tensors.KeyPoints): - keypoints[..., 1] = keypoints[..., 1].sub_(keypoints.canvas_size[0]).neg_() - return keypoints +def vertical_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> torch.Tensor: + shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) + keypoints[..., 1] = keypoints[..., 1].sub_(canvas_size[0]).neg_() + return keypoints.reshape(shape) def vertical_flip_bounding_boxes( @@ -157,6 +160,12 @@ def vertical_flip_bounding_boxes( return bounding_boxes.reshape(shape) +@_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _vertical_flip_keypoints_dispatch(inpt: tv_tensors.KeyPoints) -> tv_tensors.KeyPoints: + output = vertical_flip_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size) + return tv_tensors.wrap(output, like=inpt) + + @_register_kernel_internal(vertical_flip, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) def _vertical_flip_bounding_boxes_dispatch(inpt: tv_tensors.BoundingBoxes) -> tv_tensors.BoundingBoxes: output = vertical_flip_bounding_boxes( @@ -828,6 +837,7 @@ def _affine_keypoints_with_expand( return keypoints, canvas_size original_dtype = keypoints.dtype + original_shape = keypoints.shape keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() dtype = keypoints.dtype device = keypoints.device @@ -850,12 +860,40 @@ def _affine_keypoints_with_expand( .reshape(2, 3) .T ) - # 1) Unlike bounding box (whose implmentation we stole) we're already a bunch of points. - keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=device, dtype=dtype)], dim=-1) + + # 1) We transform points into a tensor of points with shape (N, 3), where N is the number of points. + points = keypoints.reshape(-1, 2) + points = torch.cat([points, torch.ones(points.shape[0], 1, device=device, dtype=dtype)], dim=-1) # 2) Now let's transform the points using affine matrix - keypoints = torch.matmul(keypoints, transposed_affine_matrix).to(original_dtype) + transformed_points = torch.matmul(points, transposed_affine_matrix) - return keypoints, canvas_size + if expand: + # Compute minimum point for transformed image frame: + # Points are Top-Left, Top-Right, Bottom-Left, Bottom-Right points. + height, width = canvas_size + points = torch.tensor( + [ + [0.0, 0.0, 1.0], + [0.0, float(height), 1.0], + [float(width), float(height), 1.0], + [float(width), 0.0, 1.0], + ], + dtype=dtype, + device=device, + ) + new_points = torch.matmul(points, transposed_affine_matrix) + tr = torch.amin(new_points, dim=0, keepdim=True) + # Translate keypoints + transformed_points.sub_(tr) + # Estimate meta-data for image with inverted=True + affine_vector = _get_inverse_affine_matrix(center, angle, translate, scale, shear) + new_width, new_height = _compute_affine_output_size(affine_vector, width, height) + canvas_size = (new_height, new_width) + + out_kkpoints = clamp_keypoints(transformed_points, canvas_size=canvas_size).reshape(original_shape) + out_kkpoints = out_kkpoints.to(original_dtype) + + return out_kkpoints, canvas_size def affine_keypoints( @@ -1199,16 +1237,15 @@ def _rotate_image_pil( def rotate_keypoints( - keypoints: tv_tensors.KeyPoints, + keypoints: torch.Tensor, + canvas_size: tuple[int, int], angle: float, - interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, expand: bool = False, center: Optional[list[float]] = None, - fill: _FillTypeJIT = None, ) -> tuple[torch.Tensor, tuple[int, int]]: return _affine_keypoints_with_expand( - keypoints=keypoints.as_subclass(torch.Tensor), - canvas_size=keypoints.canvas_size, + keypoints=keypoints, + canvas_size=canvas_size, angle=-angle, translate=[0.0, 0.0], scale=1.0, @@ -1220,10 +1257,10 @@ def rotate_keypoints( @_register_kernel_internal(rotate, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _rotate_keypoints_dispatch( - keypoints: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs + inpt: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: - out, canvas_size = rotate_keypoints(keypoints, angle, center=center, expand=expand, **kwargs) - return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) + out, canvas_size = rotate_keypoints(inpt, canvas_size=inpt.canvas_size, angle=angle, center=center, expand=expand) + return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) def rotate_bounding_boxes( @@ -1612,7 +1649,7 @@ def crop_keypoints( width: int, ) -> tuple[torch.Tensor, tuple[int, int]]: - keypoints.sub_(torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device)) + keypoints = keypoints - torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device) canvas_size = (height, width) return clamp_keypoints(keypoints, canvas_size=canvas_size), canvas_size @@ -1622,8 +1659,8 @@ def crop_keypoints( def _crop_keypoints_dispatch( inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int ) -> tv_tensors.KeyPoints: - out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) - return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + output, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) def crop_bounding_boxes( @@ -1799,7 +1836,7 @@ def _perspective_image_pil( return _FP.perspective(image, perspective_coeffs, interpolation=pil_modes_mapping[interpolation], fill=fill) -def perspectice_keypoints( +def perspective_keypoints( keypoints: torch.Tensor, canvas_size: tuple[int, int], startpoints: Optional[list[list[int]]], @@ -1810,7 +1847,9 @@ def perspectice_keypoints( return keypoints dtype = keypoints.dtype if torch.is_floating_point(keypoints) else torch.float32 device = keypoints.device + original_shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients) denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3] @@ -1821,23 +1860,23 @@ def perspectice_keypoints( ) theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) - keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=keypoints.device)], dim=-1) + points = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=keypoints.device)], dim=-1) - numer_points = torch.matmul(keypoints, theta1.T) - denom_points = torch.matmul(keypoints, theta2.T) + numer_points = torch.matmul(points, theta1.T) + denom_points = torch.matmul(points, theta2.T) transformed_points = numer_points.div_(denom_points) - return clamp_keypoints(transformed_points, canvas_size) + return clamp_keypoints(transformed_points.to(keypoints.dtype), canvas_size).reshape(original_shape) @_register_kernel_internal(perspective, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _perspective_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, + inpt: tv_tensors.KeyPoints, startpoints: Optional[list[list[int]]], endpoints: Optional[list[list[int]]], coefficients: Optional[list[float]] = None, **kwargs, -) -> tv_tensors.BoundingBoxes: - output = perspectice_keypoints( +) -> tv_tensors.KeyPoints: + output = perspective_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, startpoints=startpoints, @@ -2130,6 +2169,9 @@ def elastic_keypoints( if displacement.dtype != dtype or displacement.device != device: displacement = displacement.to(dtype=dtype, device=device) + original_shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) + id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype) inv_grid = id_grid.sub_(displacement) @@ -2142,11 +2184,11 @@ def elastic_keypoints( t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype) transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5) - return clamp_keypoints(transformed_points, canvas_size=canvas_size) + return clamp_keypoints(transformed_points.to(keypoints.dtype), canvas_size=canvas_size).reshape(original_shape) @_register_kernel_internal(elastic, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _elastic_keypoints_dispatch(inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs): +def _elastic_keypoints_dispatch(inpt: tv_tensors.KeyPoints, displacement: torch.Tensor, **kwargs): output = elastic_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement) return tv_tensors.wrap(output, like=inpt) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 348e14bda14..4de86144de9 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -388,14 +388,12 @@ def _clamp_bounding_boxes( return out_boxes.to(in_dtype) -def clamp_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int]) -> torch.Tensor: - if not torch.jit.is_scripting(): - _log_api_usage_once(clamp_bounding_boxes) - dtype = inpt.dtype - inpt = inpt.float() - inpt[..., 0].clamp_(0, canvas_size[1]) - inpt[..., 1].clamp_(0, canvas_size[0]) - return inpt.to(dtype=dtype) +def _clamp_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> torch.Tensor: + dtype = keypoints.dtype + keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() + keypoints[..., 0].clamp_(min=0, max=canvas_size[1]) + keypoints[..., 1].clamp_(min=0, max=canvas_size[0]) + return keypoints.to(dtype=dtype) def clamp_bounding_boxes( @@ -421,3 +419,25 @@ def clamp_bounding_boxes( raise TypeError( f"Input can either be a plain tensor or a bounding box tv_tensor, but got {type(inpt)} instead." ) + + +def clamp_keypoints( + inpt: torch.Tensor, + canvas_size: Optional[tuple[int, int]] = None, +) -> torch.Tensor: + """See :func:`~torchvision.transforms.v2.ClampKeyPoints` for details.""" + if not torch.jit.is_scripting(): + _log_api_usage_once(clamp_keypoints) + + if torch.jit.is_scripting() or is_pure_tensor(inpt): + + if canvas_size is None: + raise ValueError("For pure tensor inputs, `canvas_size` have to be passed.") + return _clamp_keypoints(inpt, canvas_size=canvas_size) + elif isinstance(inpt, tv_tensors.KeyPoints): + if canvas_size is not None: + raise ValueError("For keypoints tv_tensor inputs, `canvas_size` must not be passed.") + output = _clamp_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size) + return tv_tensors.wrap(output, like=inpt) + else: + raise TypeError(f"Input can either be a plain tensor or a keypoints tv_tensor, but got {type(inpt)} instead.") From 0de59e78c6641b63e19bab1a620dd12c4c0ed3d5 Mon Sep 17 00:00:00 2001 From: Antoine Simoulin Date: Sun, 4 May 2025 11:02:12 -0700 Subject: [PATCH 20/60] Adjust variable names --- torchvision/transforms/v2/functional/_geometry.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 50bf20e8ba0..27fe6a722bc 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -364,13 +364,14 @@ def resize_keypoints( keypoints: torch.Tensor, size: Optional[list[int]], canvas_size: tuple[int, int], - interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, - antialias: Optional[bool] = True, ): old_height, old_width = canvas_size new_height, new_width = _compute_resized_output_size(canvas_size, size=size, max_size=max_size) + if (new_height, new_width) == (old_height, old_width): + return keypoints, canvas_size + w_ratio = new_width / old_width h_ratio = new_height / old_height ratios = torch.tensor([w_ratio, h_ratio], device=keypoints.device) @@ -383,17 +384,13 @@ def resize_keypoints( def _resize_keypoints_dispatch( keypoints: tv_tensors.KeyPoints, size: Optional[list[int]], - interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, - antialias: Optional[bool] = True, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( keypoints.as_subclass(torch.Tensor), size, canvas_size=keypoints.canvas_size, - interpolation=interpolation, max_size=max_size, - antialias=antialias, ) return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) @@ -1259,8 +1256,10 @@ def rotate_keypoints( def _rotate_keypoints_dispatch( inpt: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: - out, canvas_size = rotate_keypoints(inpt, canvas_size=inpt.canvas_size, angle=angle, center=center, expand=expand) - return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + output, canvas_size = rotate_keypoints( + inpt, canvas_size=inpt.canvas_size, angle=angle, center=center, expand=expand + ) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) def rotate_bounding_boxes( From 4b62ef419d03d53bb66b6def63e96f2415a37ae3 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 21:53:39 +0200 Subject: [PATCH 21/60] Improved documentation inside of the KeyPoints class definition --- torchvision/tv_tensors/_keypoints.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index e00c58d5134..c578d49cdce 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -14,7 +14,20 @@ class KeyPoints(TVTensor): Each point is represented by its XY coordinates. KeyPoints can be converted from :class:`torchvision.tv_tensors.BoundingBoxes` - by :func:`torchvision.transforms.v2.functional.convert_box_to_points`. + by :func:`torchvision.transforms.v2.functional.convert_bounding_boxes_to_points`. + + KeyPoints may represent any object that can be represented by sequences of 2D points: + - `Polygonal chains`, including polylines, Bézier curves, etc., + which should be of shape ``[N_chains, N_points, 2]``, which is equal to ``[N_chains, N_segments + 1, 2]`` + - Polygons, which should be of shape ``[N_polygons, N_points, 2]``, which is equal to ``[N_polygons, N_sides, 2]`` + - Skeletons, which could be of shape ``[N_skeletons, N_bones, 2, 2]`` for pose-estimation models + + .. note:: + + Like for :class:`torchvision.tv_tensors.BoundingBoxes`, there should only ever be a single + instance of the :class:`torchvision.tv_tensors.KeyPoints` class per sample + e.g. ``{"img": img, "poins_of_interest": KeyPoints(...)}``, + although one :class:`torchvision.tv_tensors.KeyPoints` object can contain multiple key points Args: data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. From e99b82ae671a47d432613b457cb71ac56174a444 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 21:59:34 +0200 Subject: [PATCH 22/60] Improved convert_bounding_boxes_to_points to handle rotated bounding boxes and added tests for all formats --- test/test_transforms_v2.py | 41 ++++++++++++++----- torchvision/transforms/v2/functional/_meta.py | 26 +++++++++++- 2 files changed, 55 insertions(+), 12 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index e3d59dda6b8..c3a9692a664 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6877,18 +6877,39 @@ def test_no_valid_input(self, query): query(["blah"]) @pytest.mark.parametrize( - "boxes", [tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))] + "boxes", [ + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 2., 2.]]), format="XYXY", canvas_size=(4, 4)), # [boxes0] + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1.]]), format="XYWH", canvas_size=(4, 4)), # [boxes1] + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1.]]), format="CXCYWH", canvas_size=(4, 4)), # [boxes2] + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1., 45]]), format="CXCYWHR", canvas_size=(4, 4)), # [boxes3] + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1., 45.]]), format="XYWHR", canvas_size=(4, 4)), # [boxes4] + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 2., 2., 2., 2., 1.]]), format="XY" * 4, canvas_size=(4, 4)), # [boxes5] + ] ) def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): - # TODO: this test can't handle rotated boxes yet kp = F.convert_bounding_boxes_to_points(boxes) - assert kp.shape == boxes.shape + (2,) + assert kp.shape == (boxes.shape[0], 4, 2) assert kp.dtype == boxes.dtype # kp is a list of A, B, C, D polygons. - # If we use A | C, we should get back the XYXY format of bounding box - reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) - reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), - new_format=boxes.format, - ) - assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}" + + if F._meta.is_rotated_bounding_box_format(boxes.format): + # In the rotated case + # If we convert to XYXYXYXY format, we should get what we want. + reconverted = kp.reshape(-1, 8) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size), + new_format=boxes.format + ) + assert ((reconverted_bbox - boxes).abs() < 1e-5).all(), ( # Rotational computations mean that we can't ensure exactitude. + f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}\n\t" + f"Diff: {reconverted_bbox - boxes}" + ) + else: + # In the unrotated case + # If we use A | C, we should get back the XYXY format of bounding box + reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), + new_format=boxes.format, + ) + assert (reconverted_bbox == boxes).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}" diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 4de86144de9..843d40de0c6 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -185,16 +185,38 @@ def _xyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] +def _xyxyxyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: + return bounding_boxes[:, [[0, 1], [2, 3], [4, 5], [6, 7]]] + + def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: """Converts a set of bounding boxes to its edge points. + .. note:: + + This handles rotated :class:`tv_tensors.BoundingBoxes` formats + by first converting them to XYXYXYXY format. + + Due to floating-point approximation, this may not be an exact computation. + Args: bounding_boxes (tv_tensors.BoundingBoxes): A set of ``N`` bounding boxes (of shape ``[N, 4]``) Returns: - tv_tensors.KeyPoints: The edges, of shape ``[N, 4, 2]`` + tv_tensors.KeyPoints: The edges, as a polygon of shape ``[N, 4, 2]`` """ - # TODO: support rotated BBOX + if is_rotated_bounding_box_format(bounding_boxes.format): + # We are working on a rotated bounding box + bbox = _convert_bounding_box_format( + bounding_boxes.as_subclass(torch.Tensor), + old_format=bounding_boxes.format, + new_format=BoundingBoxFormat.XYXYXYXY, + inplace=False, + ) + return tv_tensors.KeyPoints( + _xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size + ) + bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, From a869f39e609d5003a65c862aad343cd6fa187c75 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:05:50 +0200 Subject: [PATCH 23/60] Applied ufmt --- .../transforms/v2/functional/_geometry.py | 16 ++++------------ torchvision/transforms/v2/functional/_meta.py | 6 ++---- torchvision/tv_tensors/_keypoints.py | 3 +-- 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 27fe6a722bc..448199dbe0c 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2677,9 +2677,7 @@ def five_crop_video( return five_crop_image(video, size) -def ten_crop( - inpt: torch.Tensor, size: list[int], vertical_flip: bool = False -) -> tuple[ +def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, @@ -2703,9 +2701,7 @@ def ten_crop( @_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor) @_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image) -def ten_crop_image( - image: torch.Tensor, size: list[int], vertical_flip: bool = False -) -> tuple[ +def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, @@ -2730,9 +2726,7 @@ def ten_crop_image( @_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image) -def _ten_crop_image_pil( - image: PIL.Image.Image, size: list[int], vertical_flip: bool = False -) -> tuple[ +def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: bool = False) -> tuple[ PIL.Image.Image, PIL.Image.Image, PIL.Image.Image, @@ -2757,9 +2751,7 @@ def _ten_crop_image_pil( @_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video) -def ten_crop_video( - video: torch.Tensor, size: list[int], vertical_flip: bool = False -) -> tuple[ +def ten_crop_video(video: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 843d40de0c6..d6699235572 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -196,7 +196,7 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) - This handles rotated :class:`tv_tensors.BoundingBoxes` formats by first converting them to XYXYXYXY format. - + Due to floating-point approximation, this may not be an exact computation. Args: @@ -213,9 +213,7 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) - new_format=BoundingBoxFormat.XYXYXYXY, inplace=False, ) - return tv_tensors.KeyPoints( - _xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size - ) + return tv_tensors.KeyPoints(_xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index c578d49cdce..87520353cc3 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -72,8 +72,7 @@ def __init__( dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - ): - ... + ): ... @classmethod def _wrap_output( From 6007b2cdbd298accbe05382662a38821ba4dccd7 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:07:57 +0200 Subject: [PATCH 24/60] Adding a type:ignore[override] on KeyPoints__repr__ as it also exist on BoundingBoxes.__repr__ whose signature was copied --- torchvision/tv_tensors/_keypoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 87520353cc3..1e019e46446 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -102,5 +102,5 @@ def _wrap_output( output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) return output - def __repr__(self, *, tensor_contents: Any = None) -> str: + def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] return self._make_repr(canvas_size=self.canvas_size) From b68b57bbe4c1ab54a02666f00d69519d6f7f763c Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:12:11 +0200 Subject: [PATCH 25/60] Fixed flake8 compliance on "..." present in the line of the __init__ "function" definition --- torchvision/tv_tensors/_keypoints.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 1e019e46446..8e0b1a502fc 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -72,7 +72,8 @@ def __init__( dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - ): ... + ): + pass @classmethod def _wrap_output( From 801e24d61aa5337ebd274f45c46b113ec61e1316 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:12:44 +0200 Subject: [PATCH 26/60] get_all_keypoints is now get_keypoints and returns the only keypoints object in the sample (as is assumed) --- torchvision/transforms/v2/_utils.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 5add3c7bc20..fd41b222b19 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -2,7 +2,7 @@ import collections.abc import numbers -from collections.abc import Iterable, Sequence +from collections.abc import Sequence from contextlib import suppress from typing import Any, Callable, Literal @@ -165,18 +165,16 @@ def get_bounding_boxes(flat_inputs: list[Any]) -> tv_tensors.BoundingBoxes: raise ValueError("No bounding boxes were found in the sample") -def get_all_keypoints(flat_inputs: list[Any]) -> Iterable[tv_tensors.KeyPoints]: - """Yields all KeyPoints in the input. +def get_keypoints(flat_inputs: list[Any]) -> tv_tensors.KeyPoints: + """Returns the KeyPoints in the input. - Raises: - ValueError: No KeyPoints can be found + Assumes only one ``KeyPoints`` object is present """ generator = (inpt for inpt in flat_inputs if isinstance(inpt, tv_tensors.KeyPoints)) try: - yield next(generator) + return next(generator) except StopIteration: raise ValueError("No Keypoints were found in the sample.") - return generator def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: From 73a40a83c6cfe3df36b6f0c080f7f22b9c93081e Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:31:10 +0200 Subject: [PATCH 27/60] Fixed docstring on sanitize_keypoints --- torchvision/transforms/v2/functional/_misc.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index bddd8e27721..35fc7e3110d 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -334,17 +334,24 @@ def sanitize_keypoints( """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. This removes the keypoints that are outside of their corresponing image. - You may want to first call :func:`~torchvision.transforms.v2.functional.clam_keypoints` - first to avoid undesired removals. + + It is recommended to call it at the end of a pipeline, before passing the + input to the models. It is critical to call this transform if + :class:`~torchvision.transforms.v2.RandomIoUCrop` was called. + If you want to be extra careful, you may call it after all transforms that + may modify the key points but once at the end should be enough in most + cases. .. note:: - Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes` + + Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`. Raises: ValueError: If the keypoints are not passed as a two dimensional tensor. Args: - keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed + keypoints (torch.Tensor or :class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being sanitized. + Should be of shape ``[N, 2]`` canvas_size (Optional[tuple[int, int]], optional): The canvas_size of the bounding boxes (size of the corresponding image/video). Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. @@ -372,8 +379,10 @@ def sanitize_keypoints( canvas_size=canvas_size, ) return keypoints[valid], valid + if not isinstance(keypoints, tv_tensors.KeyPoints): raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") + valid = _get_sanitize_keypoints_mask( keypoints, canvas_size=keypoints.canvas_size, From f71bb8ad14579d6394086769dfacd2ab149dabc2 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 13:33:01 +0100 Subject: [PATCH 28/60] Fix --- torchvision/transforms/v2/functional/_geometry.py | 1 + 1 file changed, 1 insertion(+) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 26f30724bd3..6d1dbd713cb 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -385,6 +385,7 @@ def _resize_keypoints_dispatch( keypoints: tv_tensors.KeyPoints, size: Optional[list[int]], max_size: Optional[int] = None, + **kwargs: Any, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( keypoints.as_subclass(torch.Tensor), From 5825706277e8ae56452f23fad873718967bba3ea Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 13:36:39 +0100 Subject: [PATCH 29/60] Fix --- test/test_tv_tensors.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index 0c06bc9c929..26333fb49c2 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -139,7 +139,8 @@ def test_to_wrapping(make_input): ) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_to_tv_tensor_reference(make_input, return_type): - tensor = torch.rand((3, 16, 16), dtype=torch.float64) + tensor = make_input().to(dtype=torch.float64).as_subclass(torch.Tensor) + assert type(tensor) is torch.Tensor dp = make_input() with tv_tensors.set_return_type(return_type): From 6a14fb9c98baffdd415fec1c2d88d46177343f98 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 13:41:38 +0100 Subject: [PATCH 30/60] Fix --- test/common_utils.py | 17 ++----- test/test_transforms_v2.py | 47 ++++++++++++++----- .../transforms/v2/functional/_geometry.py | 16 +++++-- torchvision/transforms/v2/functional/_misc.py | 2 +- 4 files changed, 51 insertions(+), 31 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 600cb5a13b7..dcaed3fd2bd 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -9,7 +9,6 @@ import sys import tempfile import warnings -from collections.abc import Sequence from subprocess import CalledProcessError, check_output, STDOUT import numpy as np @@ -401,18 +400,10 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) -def make_keypoints( - canvas_size: tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device="cpu" -) -> tv_tensors.KeyPoints: - """Make the KeyPoints for testing purposes""" - if isinstance(num_points, int): - num_points = [num_points] - single_coord_shape: tuple[int, ...] = tuple(num_points) + (1,) - y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device) - x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device) - points = torch.cat((x, y), dim=-1) - keypoints = tv_tensors.KeyPoints(points, canvas_size=canvas_size) - return keypoints +def make_keypoints(canvas_size=DEFAULT_SIZE, *, num_points=4, dtype=None, device="cpu"): + y = torch.randint(0, canvas_size[0], size=(num_points, 1), dtype=dtype, device=device) + x = torch.randint(0, canvas_size[1], size=(num_points, 1), dtype=dtype, device=device) + return tv_tensors.KeyPoints(torch.cat((x, y), dim=-1), canvas_size=canvas_size) def make_bounding_boxes( diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index c3a9692a664..c671938ed8d 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6877,14 +6877,27 @@ def test_no_valid_input(self, query): query(["blah"]) @pytest.mark.parametrize( - "boxes", [ - tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 2., 2.]]), format="XYXY", canvas_size=(4, 4)), # [boxes0] - tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1.]]), format="XYWH", canvas_size=(4, 4)), # [boxes1] - tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1.]]), format="CXCYWH", canvas_size=(4, 4)), # [boxes2] - tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1., 45]]), format="CXCYWHR", canvas_size=(4, 4)), # [boxes3] - tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1., 45.]]), format="XYWHR", canvas_size=(4, 4)), # [boxes4] - tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 2., 2., 2., 2., 1.]]), format="XY" * 4, canvas_size=(4, 4)), # [boxes5] - ] + "boxes", + [ + tv_tensors.BoundingBoxes( + torch.tensor([[1.0, 1.0, 2.0, 2.0]]), format="XYXY", canvas_size=(4, 4) + ), # [boxes0] + tv_tensors.BoundingBoxes( + torch.tensor([[1.0, 1.0, 1.0, 1.0]]), format="XYWH", canvas_size=(4, 4) + ), # [boxes1] + tv_tensors.BoundingBoxes( + torch.tensor([[1.5, 1.5, 1.0, 1.0]]), format="CXCYWH", canvas_size=(4, 4) + ), # [boxes2] + tv_tensors.BoundingBoxes( + torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4) + ), # [boxes3] + tv_tensors.BoundingBoxes( + torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4) + ), # [boxes4] + tv_tensors.BoundingBoxes( + torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XY" * 4, canvas_size=(4, 4) + ), # [boxes5] + ], ) def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): kp = F.convert_bounding_boxes_to_points(boxes) @@ -6897,10 +6910,14 @@ def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes) # If we convert to XYXYXYXY format, we should get what we want. reconverted = kp.reshape(-1, 8) reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size), - new_format=boxes.format + tv_tensors.BoundingBoxes( + reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size + ), + new_format=boxes.format, ) - assert ((reconverted_bbox - boxes).abs() < 1e-5).all(), ( # Rotational computations mean that we can't ensure exactitude. + assert ( + (reconverted_bbox - boxes).abs() < 1e-5 + ).all(), ( # Rotational computations mean that we can't ensure exactitude. f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}\n\t" f"Diff: {reconverted_bbox - boxes}" ) @@ -6909,7 +6926,11 @@ def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes) # If we use A | C, we should get back the XYXY format of bounding box reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), + tv_tensors.BoundingBoxes( + reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size + ), new_format=boxes.format, ) - assert (reconverted_bbox == boxes).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}" + assert ( + reconverted_bbox == boxes + ).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}" diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 6d1dbd713cb..d24103fae3b 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2678,7 +2678,9 @@ def five_crop_video( return five_crop_image(video, size) -def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ +def ten_crop( + inpt: torch.Tensor, size: list[int], vertical_flip: bool = False +) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, @@ -2702,7 +2704,9 @@ def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) - @_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor) @_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image) -def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ +def ten_crop_image( + image: torch.Tensor, size: list[int], vertical_flip: bool = False +) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, @@ -2727,7 +2731,9 @@ def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = F @_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image) -def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: bool = False) -> tuple[ +def _ten_crop_image_pil( + image: PIL.Image.Image, size: list[int], vertical_flip: bool = False +) -> tuple[ PIL.Image.Image, PIL.Image.Image, PIL.Image.Image, @@ -2752,7 +2758,9 @@ def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: @_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video) -def ten_crop_video(video: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ +def ten_crop_video( + video: torch.Tensor, size: list[int], vertical_flip: bool = False +) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 35fc7e3110d..f0384a643ab 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -344,7 +344,7 @@ def sanitize_keypoints( .. note:: - Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`. + Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`. Raises: ValueError: If the keypoints are not passed as a two dimensional tensor. From 35650a4e5a918a90676fae9066ed8543cced9c5d Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:23:07 +0100 Subject: [PATCH 31/60] Revert "Fix" This reverts commit 5825706277e8ae56452f23fad873718967bba3ea. --- test/test_tv_tensors.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index 26333fb49c2..0c06bc9c929 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -139,8 +139,7 @@ def test_to_wrapping(make_input): ) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_to_tv_tensor_reference(make_input, return_type): - tensor = make_input().to(dtype=torch.float64).as_subclass(torch.Tensor) - assert type(tensor) is torch.Tensor + tensor = torch.rand((3, 16, 16), dtype=torch.float64) dp = make_input() with tv_tensors.set_return_type(return_type): From 08e8843c7f80ff4bd0480a2a8cca445055841fda Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:24:58 +0100 Subject: [PATCH 32/60] Remove some annotations --- torchvision/tv_tensors/_keypoints.py | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 8e0b1a502fc..928df5dd6ae 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Mapping, MutableSequence, Optional, Sequence, Tuple, TYPE_CHECKING, Union +from typing import Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union import torch from torch.utils._pytree import tree_flatten @@ -40,13 +40,13 @@ class KeyPoints(TVTensor): ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. """ - canvas_size: Tuple[int, int] + canvas_size: tuple[int, int] def __new__( cls, data: Any, *, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, @@ -60,21 +60,6 @@ def __new__( points.canvas_size = canvas_size return points - if TYPE_CHECKING: - # EVIL: Just so that MYPY+PYLANCE+others stop shouting that everything is wrong when initializeing the TVTensor - # Not read or defined at Runtime (only at linting time). - # TODO: BOUNDING BOXES needs something similar - def __init__( - self, - data: Any, - *, - canvas_size: Tuple[int, int], - dtype: Optional[torch.dtype] = None, - device: Optional[Union[torch.device, str, int]] = None, - requires_grad: Optional[bool] = None, - ): - pass - @classmethod def _wrap_output( cls, @@ -87,7 +72,7 @@ def _wrap_output( # For BoundingBoxes, that included format, but we only support one format here ! flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator] first_bbox_from_args = next(x for x in flat_params if isinstance(x, KeyPoints)) - canvas_size: Tuple[int, int] = first_bbox_from_args.canvas_size + canvas_size = first_bbox_from_args.canvas_size if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): output = KeyPoints(output, canvas_size=canvas_size) From f03f958f5cad75cfa2cd5f6287b6583074034fcf Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:29:21 +0100 Subject: [PATCH 33/60] Align signatures --- torchvision/tv_tensors/_keypoints.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 928df5dd6ae..dbc6af0f278 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -47,10 +47,10 @@ def __new__( data: Any, *, canvas_size: tuple[int, int], - dtype: Optional[torch.dtype] = None, - device: Optional[Union[torch.device, str, int]] = None, - requires_grad: Optional[bool] = None, - ): + dtype: torch.dtype | None = None, + device: torch.device | str | int | None = None, + requires_grad: bool | None = None, + ) -> KeyPoints: tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) if tensor.ndim == 1: tensor = tensor.unsqueeze(0) @@ -63,10 +63,10 @@ def __new__( @classmethod def _wrap_output( cls, - output: Any, + output: torch.Tensor, args: Sequence[Any] = (), - kwargs: Optional[Mapping[str, Any]] = None, - ) -> Any: + kwargs: Mapping[str, Any] | None = None, + ) -> KeyPoints: # Mostly copied over from the BoundingBoxes TVTensor, minor improvements. # This copies over the metadata. # For BoundingBoxes, that included format, but we only support one format here ! From d04a3e39d13a87c5c22509644ee5c2406ccb1faf Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:33:26 +0100 Subject: [PATCH 34/60] Proper fix for test_to_tv_tensor_reference --- torchvision/tv_tensors/_bounding_boxes.py | 1 + torchvision/tv_tensors/_keypoints.py | 36 +++++++++++------------ 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/torchvision/tv_tensors/_bounding_boxes.py b/torchvision/tv_tensors/_bounding_boxes.py index bf8ed8cfcb4..31bfbf58b55 100644 --- a/torchvision/tv_tensors/_bounding_boxes.py +++ b/torchvision/tv_tensors/_bounding_boxes.py @@ -111,6 +111,7 @@ def _wrap_output( if isinstance(output, torch.Tensor) and not isinstance(output, BoundingBoxes): output = BoundingBoxes._wrap(output, format=format, canvas_size=canvas_size, check_dims=False) elif isinstance(output, (tuple, list)): + # This branch exists for chunk() and unbind() output = type(output)( BoundingBoxes._wrap(part, format=format, canvas_size=canvas_size, check_dims=False) for part in output ) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index dbc6af0f278..e8283ba7a3b 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -42,6 +42,17 @@ class KeyPoints(TVTensor): canvas_size: tuple[int, int] + @classmethod + def _wrap(cls, tensor: torch.Tensor, *, canvas_size: tuple[int, int], check_dims: bool = True) -> KeyPoints: # type: ignore[override] + if check_dims: + if tensor.ndim == 1: + tensor = tensor.unsqueeze(0) + elif tensor.shape[-1] != 2: + raise ValueError(f"Expected a tensor of shape (..., 2), not {tensor.shape}") + points = tensor.as_subclass(cls) + points.canvas_size = canvas_size + return points + def __new__( cls, data: Any, @@ -51,14 +62,8 @@ def __new__( device: torch.device | str | int | None = None, requires_grad: bool | None = None, ) -> KeyPoints: - tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) - if tensor.ndim == 1: - tensor = tensor.unsqueeze(0) - elif tensor.shape[-1] != 2: - raise ValueError(f"Expected a tensor of shape (..., 2), not {tensor.shape}") - points = tensor.as_subclass(cls) - points.canvas_size = canvas_size - return points + tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) + return cls._wrap(tensor, canvas_size=canvas_size) @classmethod def _wrap_output( @@ -75,17 +80,10 @@ def _wrap_output( canvas_size = first_bbox_from_args.canvas_size if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): - output = KeyPoints(output, canvas_size=canvas_size) - elif isinstance(output, MutableSequence): - # For lists and list-like object we don't try to create a new object, we just set the values in the list - # This allows us to conserve the type of complex list-like object that may not follow the initialization API of lists - for i, part in enumerate(output): - output[i] = KeyPoints(part, canvas_size=canvas_size) - elif isinstance(output, Sequence): - # Non-mutable sequences handled here (like tuples) - # Every sequence that is not a mutable sequence is a non-mutable sequence - # We have to use a tuple here, since we know its initialization api, unlike for `output` - output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) + output = KeyPoints._wrap(output, canvas_size=canvas_size, check_dims=False) + elif isinstance(output, (tuple, list)): + # This branch exists for chunk() and unbind() + output = type(output)(KeyPoints._wrap(part, canvas_size=canvas_size, check_dims=False) for part in output) return output def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] From 0b1da8941f68b148cb8a6b1ff54324e494903137 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:35:29 +0100 Subject: [PATCH 35/60] lint --- torchvision/tv_tensors/_keypoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index e8283ba7a3b..2aecd3bff8a 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union +from typing import Any, Mapping, Sequence import torch from torch.utils._pytree import tree_flatten From f4c8da82446602207c420ca4dd28f9d95453b5fc Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:37:59 +0100 Subject: [PATCH 36/60] Add more keypoints tests in test_tv_tensors --- test/test_tv_tensors.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index 0c06bc9c929..a6e77614a40 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -286,7 +286,9 @@ def test_inplace_op_no_wrapping(make_input, return_type): assert type(dp) is original_type -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) def test_wrap(make_input): dp = make_input() @@ -299,7 +301,9 @@ def test_wrap(make_input): assert dp_new.data_ptr() == output.data_ptr() -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("requires_grad", [False, True]) def test_deepcopy(make_input, requires_grad): dp = make_input(dtype=torch.float) @@ -316,7 +320,9 @@ def test_deepcopy(make_input, requires_grad): assert dp_deepcopied.requires_grad is requires_grad -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) @pytest.mark.parametrize( "op", From 48289d22a367c36fdac7cf2012656427bbb57ff0 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 14:47:49 +0100 Subject: [PATCH 37/60] Revert changes to SanitizeBoundingBoxes --- torchvision/transforms/v2/_misc.py | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index d6d61fa6d6c..dfd521b13be 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -341,9 +341,9 @@ def transform(self, inpt: Any, params: dict[str, Any]) -> Any: class SanitizeBoundingBoxes(Transform): - """Remove degenerate/invalid bounding boxes and their corresponding labels, masks and keypoints. + """Remove degenerate/invalid bounding boxes and their corresponding labels and masks. - This transform removes bounding boxes and their associated labels, masks and keypoints that: + This transform removes bounding boxes and their associated labels/masks that: - are below a given ``min_size`` or ``min_area``: by default this also removes degenerate boxes that have e.g. X2 <= X1. - have any coordinate outside of their corresponding image. You may want to @@ -359,14 +359,6 @@ class SanitizeBoundingBoxes(Transform): may modify bounding boxes but once at the end should be enough in most cases. - .. note:: - This transform requires that any :class:`~torchvision.tv_tensor.KeyPoints` or - :class:`~torchvision.tv_tensor.Mask` provided has to match the bounding boxes in shape. - - If the bounding boxes are of shape ``[N, K]``, then the - KeyPoints have to be of shape ``[N, ..., 2]`` or ``[N, 2]`` - and the masks have to be of shape ``[N, ..., H, W]`` or ``[N, H, W]`` - Args: min_size (float, optional): The size below which bounding boxes are removed. Default is 1. min_area (float, optional): The area below which bounding boxes are removed. Default is 1. @@ -446,15 +438,10 @@ def forward(self, *inputs: Any) -> Any: return tree_unflatten(flat_outputs, spec) def transform(self, inpt: Any, params: dict[str, Any]) -> Any: - # For every object in the flattened input of the `forward` method, we apply transform - # The params contain the list of valid indices of the (N, K) bbox set - - # We suppose here that any KeyPoints or Masks TVTensors is of shape (N, ..., 2) and (N, ..., H, W) respectively - # TODO: check this. is_label = params["labels"] is not None and any(inpt is label for label in params["labels"]) - is_bbox_mask_or_kpoints = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints)) + is_bounding_boxes_or_mask = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)) - if not (is_label or is_bbox_mask_or_kpoints): + if not (is_label or is_bounding_boxes_or_mask): return inpt output = inpt[params["valid"]] From fc528ffe6467ab31decdefcd12367dac06592344 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 15:40:30 +0100 Subject: [PATCH 38/60] Bunch of stuff --- test/test_tv_tensors.py | 30 ++++--------------- torchvision/transforms/v2/_utils.py | 1 + torchvision/transforms/v2/functional/_misc.py | 2 ++ 3 files changed, 8 insertions(+), 25 deletions(-) diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index a6e77614a40..eeae6a8d321 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -56,37 +56,17 @@ def test_bbox_dim_error(): tv_tensors.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32)) -@pytest.mark.parametrize( - "data", - [ - torch.randint(0, 32, size=(5, 2)), - [ - [ - 0, - 0, - ], - [ - 2, - 2, - ], - ], - [ - 1, - 2, - ], - ], -) +@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 2)), [[0, 0], [2, 2]], [1, 2]]) def test_keypoints_instance(data): kpoint = tv_tensors.KeyPoints(data, canvas_size=(32, 32)) - assert isinstance(kpoint, tv_tensors.KeyPoints) + assert isinstance(kpoint, torch.Tensor) assert type(kpoint) is tv_tensors.KeyPoints assert kpoint.shape[-1] == 2 def test_keypoints_shape_error(): - data_3d = [(0, 1, 2)] - with pytest.raises(ValueError, match="shape"): - tv_tensors.KeyPoints(torch.tensor(data_3d), canvas_size=(11, 7)) + with pytest.raises(ValueError, match="Expected a tensor of shape"): + tv_tensors.KeyPoints(torch.tensor([[1, 2, 3]]), canvas_size=(11, 7)) @pytest.mark.parametrize( @@ -231,9 +211,9 @@ def test_force_subclass_with_metadata(return_type): bbox.requires_grad_(True) kpoints.requires_grad_(True) if return_type == "TVTensor": - assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) assert bbox.requires_grad + assert kpoints.canvas_size == canvas_size assert kpoints.requires_grad tv_tensors.set_return_type("tensor") diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index fd41b222b19..b6ce53e390b 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -165,6 +165,7 @@ def get_bounding_boxes(flat_inputs: list[Any]) -> tv_tensors.BoundingBoxes: raise ValueError("No bounding boxes were found in the sample") +# TODOKP this is unused and un-tested def get_keypoints(flat_inputs: list[Any]) -> tv_tensors.KeyPoints: """Returns the KeyPoints in the input. diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index f0384a643ab..f06493aa099 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -328,6 +328,7 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo return inpt.to(dtype) +# TODOKP This is untested. Also there's no corresponding transform class def sanitize_keypoints( keypoints: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None ) -> tuple[torch.Tensor, torch.Tensor]: @@ -390,6 +391,7 @@ def sanitize_keypoints( return tv_tensors.wrap(keypoints[valid], like=keypoints), valid +# TODOKP Untested, see above def _get_sanitize_keypoints_mask( keypoints: torch.Tensor, canvas_size: tuple[int, int], From ec459e65f37d78f0ecc76083e1501d40fbc5f6e7 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 15:43:39 +0100 Subject: [PATCH 39/60] Remove some stuff --- torchvision/tv_tensors/__init__.py | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index e1c6b2202df..f17bec11ab6 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -1,5 +1,3 @@ -from typing import TypeVar - import torch from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat @@ -11,14 +9,11 @@ from ._video import Video -_WRAP_LIKE_T = TypeVar("_WRAP_LIKE_T", bound=TVTensor) - - # TODO: Fix this. We skip this method as it leads to # RecursionError: maximum recursion depth exceeded while calling a Python object # Until `disable` is removed, there will be graph breaks after all calls to functional transforms @torch.compiler.disable -def wrap(wrappee: torch.Tensor, *, like: _WRAP_LIKE_T, **kwargs) -> _WRAP_LIKE_T: +def wrap(wrappee, *, like, **kwargs): """Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``. If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of @@ -32,25 +27,12 @@ def wrap(wrappee: torch.Tensor, *, like: _WRAP_LIKE_T, **kwargs) -> _WRAP_LIKE_T Ignored otherwise. """ if isinstance(like, BoundingBoxes): - return BoundingBoxes._wrap( # type:ignore + return BoundingBoxes._wrap( wrappee, format=kwargs.get("format", like.format), canvas_size=kwargs.get("canvas_size", like.canvas_size), ) elif isinstance(like, KeyPoints): - return KeyPoints(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size)) # type:ignore + return KeyPoints._wrap(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size)) else: return wrappee.as_subclass(type(like)) - - -__all__: list[str] = [ - "wrap", - "KeyPoints", - "Video", - "TVTensor", - "set_return_type", - "Mask", - "Image", - "BoundingBoxFormat", - "BoundingBoxes", -] From c499ef5e9464175b6d5e1fdf6ffb61e3693018b0 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 15:47:55 +0100 Subject: [PATCH 40/60] Fix variable name --- torchvision/tv_tensors/_keypoints.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 2aecd3bff8a..80ed194bc63 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -72,12 +72,10 @@ def _wrap_output( args: Sequence[Any] = (), kwargs: Mapping[str, Any] | None = None, ) -> KeyPoints: - # Mostly copied over from the BoundingBoxes TVTensor, minor improvements. - # This copies over the metadata. - # For BoundingBoxes, that included format, but we only support one format here ! + # Similar to BoundingBoxes._wrap_output(), see comment there. flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator] - first_bbox_from_args = next(x for x in flat_params if isinstance(x, KeyPoints)) - canvas_size = first_bbox_from_args.canvas_size + first_keypoints_from_args = next(x for x in flat_params if isinstance(x, KeyPoints)) + canvas_size = first_keypoints_from_args.canvas_size if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): output = KeyPoints._wrap(output, canvas_size=canvas_size, check_dims=False) From e26a07c98a4838842112bd3df4be70c777bb48c5 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Tue, 3 Jun 2025 15:55:53 +0100 Subject: [PATCH 41/60] Cleanup untested stuff --- test/test_transforms_v2.py | 37 +++++++------------------------------ 1 file changed, 7 insertions(+), 30 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index c671938ed8d..16d9332db97 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -336,20 +336,6 @@ def _make_transform_sample(transform, *, image_or_video, adapter): device=device, ), keypoints=make_keypoints(canvas_size=size), - keypoints_degenerate=tv_tensors.KeyPoints( - [ - [0, 1], # left edge - [1, 0], # top edge - [0, 0], # top left corner - [size[1], 1], # right edge - [size[1], 0], # top right corner - [1, size[0]], # bottom edge - [0, size[0]], # bottom left corner - [size[1], size[0]], # bottom right corner - ], - canvas_size=size, - device=device, - ), detection_mask=make_detection_masks(size, device=device), segmentation_mask=make_segmentation_mask(size, device=device), int=0, @@ -404,6 +390,7 @@ def _check_transform_sample_input_smoke(transform, input, *, adapter): # Enforce that the transform does not turn a degenerate bounding box, e.g. marked by RandomIoUCrop (or any other # future transform that does this), back into a valid one. + # TODO: We may want to do that for KeyPoints too for degenerate_bounding_boxes in ( bounding_box for name, bounding_box in sample.items() @@ -6879,24 +6866,14 @@ def test_no_valid_input(self, query): @pytest.mark.parametrize( "boxes", [ - tv_tensors.BoundingBoxes( - torch.tensor([[1.0, 1.0, 2.0, 2.0]]), format="XYXY", canvas_size=(4, 4) - ), # [boxes0] - tv_tensors.BoundingBoxes( - torch.tensor([[1.0, 1.0, 1.0, 1.0]]), format="XYWH", canvas_size=(4, 4) - ), # [boxes1] - tv_tensors.BoundingBoxes( - torch.tensor([[1.5, 1.5, 1.0, 1.0]]), format="CXCYWH", canvas_size=(4, 4) - ), # [boxes2] - tv_tensors.BoundingBoxes( - torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4) - ), # [boxes3] - tv_tensors.BoundingBoxes( - torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4) - ), # [boxes4] + tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 2.0, 2.0]]), format="XYXY", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0]]), format="XYWH", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0]]), format="CXCYWH", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4)), tv_tensors.BoundingBoxes( torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XY" * 4, canvas_size=(4, 4) - ), # [boxes5] + ), ], ) def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): From b91583e2c0f7afcb35a022b10b03142b48d858ef Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 11:32:44 +0100 Subject: [PATCH 42/60] Add todos and complete error messages --- torchvision/transforms/v2/_augment.py | 4 +++- torchvision/transforms/v2/_utils.py | 2 +- torchvision/transforms/v2/functional/_meta.py | 1 + torchvision/tv_tensors/_keypoints.py | 3 +++ 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index 980e27647f7..c6da9aba98b 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -158,7 +158,9 @@ def forward(self, *inputs): needs_transform_list = self._needs_transform_list(flat_inputs) if has_any(flat_inputs, PIL.Image.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints): - raise ValueError(f"{type(self).__name__}() does not support PIL images, bounding boxes and masks.") + raise ValueError( + f"{type(self).__name__}() does not support PIL images, bounding boxes, keypoints and masks." + ) labels = self._labels_getter(inputs) if not isinstance(labels, torch.Tensor): diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index b6ce53e390b..bc79c266bae 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -212,7 +212,7 @@ def query_size(flat_inputs: list[Any]) -> tuple[int, int]: ) } if not sizes: - raise TypeError("No image, video, mask or bounding box was found in the sample") + raise TypeError("No image, video, mask, bounding box of keypoint was found in the sample") elif len(sizes) > 1: raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}") h, w = sizes.pop() diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 383e6ca91b0..29c4ab018e5 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -189,6 +189,7 @@ def _xyxyxyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 3], [4, 5], [6, 7]]] +# TODOKP Should this be in the box ops? rename points->keypoints. def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: """Converts a set of bounding boxes to its edge points. diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 80ed194bc63..55772cbecac 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -8,6 +8,9 @@ from ._tv_tensor import TVTensor +# TODOKP do we support arbitrary number of leading dimension, as claimed in the +# docs? +# This doesn't seem to be tested? class KeyPoints(TVTensor): """:class:`torch.Tensor` subclass for tensors with shape ``[..., 2]`` that represent points in an image. From 6dc2e333a384739203ddc3baf568e28a40f896bd Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 11:44:13 +0100 Subject: [PATCH 43/60] Add test for get_size --- test/test_transforms_v2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index be2e37f34f8..4e5d507fbc0 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -2839,6 +2839,7 @@ def test_get_num_channels(self, kernel, make_input): (F._meta._get_size_image_pil, make_image_pil), (F.get_size_image, make_image), (F.get_size_bounding_boxes, make_bounding_boxes), + (F.get_size_keypoints, make_keypoints), (F.get_size_mask, make_detection_masks), (F.get_size_mask, make_segmentation_mask), (F.get_size_video, make_video), From 0db21e0e2106080724a7a6a42ecf7892fac97956 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 11:56:52 +0100 Subject: [PATCH 44/60] Simplify test --- test/test_transforms_v2.py | 41 +++++++------------ torchvision/transforms/v2/functional/_meta.py | 4 +- 2 files changed, 16 insertions(+), 29 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 4e5d507fbc0..2943a5c8770 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6946,6 +6946,7 @@ def test_no_valid_input(self, query): with pytest.raises(TypeError, match="No image"): query(["blah"]) + # TODOKP this is tested here in TestUtils but defined in meta @pytest.mark.parametrize( "boxes", [ @@ -6955,7 +6956,7 @@ def test_no_valid_input(self, query): tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4)), tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4)), tv_tensors.BoundingBoxes( - torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XY" * 4, canvas_size=(4, 4) + torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XYXYXYXY", canvas_size=(4, 4) ), ], ) @@ -6963,34 +6964,20 @@ def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes) kp = F.convert_bounding_boxes_to_points(boxes) assert kp.shape == (boxes.shape[0], 4, 2) assert kp.dtype == boxes.dtype - # kp is a list of A, B, C, D polygons. + # We manually convert the kp back into a BoundingBoxes, and convert that + # bbox back into the original `boxes` format to compare against it. if F._meta.is_rotated_bounding_box_format(boxes.format): - # In the rotated case - # If we convert to XYXYXYXY format, we should get what we want. reconverted = kp.reshape(-1, 8) - reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes( - reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size - ), - new_format=boxes.format, - ) - assert ( - (reconverted_bbox - boxes).abs() < 1e-5 - ).all(), ( # Rotational computations mean that we can't ensure exactitude. - f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}\n\t" - f"Diff: {reconverted_bbox - boxes}" - ) + intermediate_format = tv_tensors.BoundingBoxFormat.XYXYXYXY else: - # In the unrotated case - # If we use A | C, we should get back the XYXY format of bounding box reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) - reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes( - reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size - ), - new_format=boxes.format, - ) - assert ( - reconverted_bbox == boxes - ).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}" + intermediate_format = tv_tensors.BoundingBoxFormat.XYXY + + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes( + reconverted, format=intermediate_format, canvas_size=kp.canvas_size + ), + new_format=boxes.format, + ) + assert_equal(reconverted_bbox, boxes, atol=1e-5, rtol=0) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 29c4ab018e5..80ca33aa744 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -189,9 +189,9 @@ def _xyxyxyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 3], [4, 5], [6, 7]]] -# TODOKP Should this be in the box ops? rename points->keypoints. +# TODOKP Should this be in the box ops? Or in utils? rename points->keypoints. def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: - """Converts a set of bounding boxes to its edge points. + """Convert a set of bounding boxes to its edge points. .. note:: From d4b130dbcc88733c252b7ac4fbf29e353e918c9c Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 13:38:03 +0100 Subject: [PATCH 45/60] Add more tests --- test/test_transforms_v2.py | 119 +++++++++++++----- torchvision/transforms/v2/functional/_meta.py | 2 +- torchvision/transforms/v2/functional/_misc.py | 1 + 3 files changed, 88 insertions(+), 34 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 2943a5c8770..d1b5c508a32 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -654,14 +654,8 @@ def affine_keypoints(keypoints): ) if clamp: - # It is important to clamp before casting, especially for CXCYWH format, dtype=int64 - output = F.clamp_keypoints( - output, - canvas_size=canvas_size, - ) + output = F.clamp_keypoints(output, canvas_size=canvas_size) else: - # We leave the bounding box as float64 so the caller gets the full precision to perform any additional - # operation dtype = output.dtype return output.to(dtype=dtype, device=device) @@ -803,7 +797,15 @@ def test_kernel_video(self): @pytest.mark.parametrize("size", OUTPUT_SIZES) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, size, make_input): max_size_kwarg = self._make_max_size_kwarg(use_max_size=size is None, size=size) @@ -844,6 +846,7 @@ def test_functional_signature(self, kernel, input_type): make_segmentation_mask, make_detection_masks, make_video, + make_keypoints, ], ) def test_transform(self, size, device, make_input): @@ -901,6 +904,22 @@ def _reference_resize_bounding_boxes(self, bounding_boxes, *, size, max_size=Non new_canvas_size=(new_height, new_width), ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) + @pytest.mark.parametrize("size", OUTPUT_SIZES) + @pytest.mark.parametrize("use_max_size", [True, False]) + @pytest.mark.parametrize("fn", [F.resize, transform_cls_to_functional(transforms.Resize)]) + def test_bounding_boxes_correctness(self, format, size, use_max_size, fn): + if not (max_size_kwarg := self._make_max_size_kwarg(use_max_size=use_max_size, size=size)): + return + + bounding_boxes = make_bounding_boxes(format=format, canvas_size=self.INPUT_SIZE) + + actual = fn(bounding_boxes, size=size, **max_size_kwarg) + expected = self._reference_resize_bounding_boxes(bounding_boxes, size=size, **max_size_kwarg) + + self._check_output_size(bounding_boxes, actual, size=size, **max_size_kwarg) + torch.testing.assert_close(actual, expected) + def _reference_resize_keypoints(self, keypoints, *, size, max_size=None): old_height, old_width = keypoints.canvas_size new_height, new_width = self._compute_output_size( @@ -923,22 +942,6 @@ def _reference_resize_keypoints(self, keypoints, *, size, max_size=None): new_canvas_size=(new_height, new_width), ) - @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) - @pytest.mark.parametrize("size", OUTPUT_SIZES) - @pytest.mark.parametrize("use_max_size", [True, False]) - @pytest.mark.parametrize("fn", [F.resize, transform_cls_to_functional(transforms.Resize)]) - def test_bounding_boxes_correctness(self, format, size, use_max_size, fn): - if not (max_size_kwarg := self._make_max_size_kwarg(use_max_size=use_max_size, size=size)): - return - - bounding_boxes = make_bounding_boxes(format=format, canvas_size=self.INPUT_SIZE) - - actual = fn(bounding_boxes, size=size, **max_size_kwarg) - expected = self._reference_resize_bounding_boxes(bounding_boxes, size=size, **max_size_kwarg) - - self._check_output_size(bounding_boxes, actual, size=size, **max_size_kwarg) - torch.testing.assert_close(actual, expected) - @pytest.mark.parametrize("size", OUTPUT_SIZES) @pytest.mark.parametrize("use_max_size", [True, False]) @pytest.mark.parametrize("fn", [F.resize, transform_cls_to_functional(transforms.Resize)]) @@ -989,6 +992,7 @@ def test_functional_pil_antialias_warning(self): make_segmentation_mask, make_detection_masks, make_video, + make_keypoints, ], ) def test_max_size_error(self, size, make_input): @@ -1031,6 +1035,7 @@ def test_max_size_error(self, size, make_input): make_segmentation_mask, make_detection_masks, make_video, + make_keypoints, ], ) def test_resize_size_none(self, input_size, max_size, expected_size, make_input): @@ -1076,6 +1081,7 @@ def test_transform_unknown_size_error(self): make_segmentation_mask, make_detection_masks, make_video, + make_keypoints, ], ) def test_noop(self, size, make_input): @@ -1103,6 +1109,7 @@ def test_noop(self, size, make_input): make_segmentation_mask, make_detection_masks, make_video, + make_keypoints, ], ) def test_no_regression_5405(self, make_input): @@ -1215,7 +1222,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.horizontal_flip, make_input()) @@ -1237,7 +1252,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform(self, make_input, device): @@ -1304,7 +1327,15 @@ def test_keypoints_correctness(self, fn): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform_noop(self, make_input, device): @@ -1778,7 +1809,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.vertical_flip, make_input()) @@ -1800,7 +1839,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform(self, make_input, device): @@ -1861,7 +1908,15 @@ def test_keypoints_correctness(self, fn): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform_noop(self, make_input, device): @@ -6975,9 +7030,7 @@ def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes) intermediate_format = tv_tensors.BoundingBoxFormat.XYXY reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes( - reconverted, format=intermediate_format, canvas_size=kp.canvas_size - ), + tv_tensors.BoundingBoxes(reconverted, format=intermediate_format, canvas_size=kp.canvas_size), new_format=boxes.format, ) assert_equal(reconverted_bbox, boxes, atol=1e-5, rtol=0) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 80ca33aa744..4e812fcaaf7 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -462,11 +462,11 @@ def _clamp_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> t return keypoints.to(dtype=dtype) +# TODOKP there is no corresponding transform and this isn't tested def clamp_keypoints( inpt: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None, ) -> torch.Tensor: - """See :func:`~torchvision.transforms.v2.ClampKeyPoints` for details.""" if not torch.jit.is_scripting(): _log_api_usage_once(clamp_keypoints) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index f06493aa099..b9ffb216aa0 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -346,6 +346,7 @@ def sanitize_keypoints( .. note:: Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`. + TODOKP Is this desirable? We probably want keypoints to behave the same as bboxes? Raises: ValueError: If the keypoints are not passed as a two dimensional tensor. From 2e28525ed30773ca9a32b92467dc50d93655bdf7 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 13:46:16 +0100 Subject: [PATCH 46/60] More tests --- test/test_transforms_v2.py | 27 ++++++++++--------- .../transforms/v2/functional/_geometry.py | 7 ++--- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index d1b5c508a32..c18acb72fb1 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -1461,7 +1461,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional(F.affine, make_input(), **self._MINIMAL_AFFINE_KWARGS) @@ -1483,7 +1483,7 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform(self, make_input, device): @@ -1588,17 +1588,6 @@ def _reference_affine_bounding_boxes(self, bounding_boxes, *, angle, translate, ), ) - def _reference_affine_keypoints(self, keypoints, *, angle, translate, scale, shear, center): - if center is None: - center = [s * 0.5 for s in keypoints.canvas_size[::-1]] - - return reference_affine_keypoints_helper( - keypoints, - affine_matrix=self._compute_affine_matrix( - angle=angle, translate=translate, scale=scale, shear=shear, center=center - ), - ) - @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) @@ -1645,6 +1634,18 @@ def test_transform_bounding_boxes_correctness(self, format, center, seed): torch.testing.assert_close(actual, expected) + def _reference_affine_keypoints(self, keypoints, *, angle, translate, scale, shear, center): + if center is None: + center = [s * 0.5 for s in keypoints.canvas_size[::-1]] + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=self._compute_affine_matrix( + angle=angle, translate=translate, scale=scale, shear=shear, center=center + ), + ) + + @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) @pytest.mark.parametrize("scale", _CORRECTNESS_AFFINE_KWARGS["scale"]) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 4672fb71efc..e38574d4378 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -868,6 +868,7 @@ def _affine_image_pil( return _FP.affine(image, matrix, interpolation=pil_modes_mapping[interpolation], fill=fill) +# TODO: Consider merging/unifying this with the bbox implementation def _affine_keypoints_with_expand( keypoints: torch.Tensor, canvas_size: tuple[int, int], @@ -935,10 +936,10 @@ def _affine_keypoints_with_expand( new_width, new_height = _compute_affine_output_size(affine_vector, width, height) canvas_size = (new_height, new_width) - out_kkpoints = clamp_keypoints(transformed_points, canvas_size=canvas_size).reshape(original_shape) - out_kkpoints = out_kkpoints.to(original_dtype) + out_keypoints = clamp_keypoints(transformed_points, canvas_size=canvas_size).reshape(original_shape) + out_keypoints = out_keypoints.to(original_dtype) - return out_kkpoints, canvas_size + return out_keypoints, canvas_size def affine_keypoints( From 51bd300da005bf0fcc5580b475a3c3bfafc1a70c Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 13:50:19 +0100 Subject: [PATCH 47/60] Add more test_functional_signature --- test/test_transforms_v2.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index c18acb72fb1..f1fd3ada61e 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -825,10 +825,10 @@ def test_functional(self, size, make_input): (F.resize_image, torch.Tensor), (F._geometry._resize_image_pil, PIL.Image.Image), (F.resize_image, tv_tensors.Image), - (F.resize_keypoints, tv_tensors.KeyPoints), (F.resize_bounding_boxes, tv_tensors.BoundingBoxes), (F.resize_mask, tv_tensors.Mask), (F.resize_video, tv_tensors.Video), + (F.resize_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -1241,10 +1241,10 @@ def test_functional(self, make_input): (F.horizontal_flip_image, torch.Tensor), (F._geometry._horizontal_flip_image_pil, PIL.Image.Image), (F.horizontal_flip_image, tv_tensors.Image), - (F.horizontal_flip_keypoints, tv_tensors.KeyPoints), (F.horizontal_flip_bounding_boxes, tv_tensors.BoundingBoxes), (F.horizontal_flip_mask, tv_tensors.Mask), (F.horizontal_flip_video, tv_tensors.Video), + (F.horizontal_flip_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -1472,10 +1472,10 @@ def test_functional(self, make_input): (F.affine_image, torch.Tensor), (F._geometry._affine_image_pil, PIL.Image.Image), (F.affine_image, tv_tensors.Image), - (F.affine_keypoints, tv_tensors.KeyPoints), (F.affine_bounding_boxes, tv_tensors.BoundingBoxes), (F.affine_mask, tv_tensors.Mask), (F.affine_video, tv_tensors.Video), + (F.affine_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -1829,10 +1829,10 @@ def test_functional(self, make_input): (F.vertical_flip_image, torch.Tensor), (F._geometry._vertical_flip_image_pil, PIL.Image.Image), (F.vertical_flip_image, tv_tensors.Image), - (F.vertical_flip_keypoints, tv_tensors.KeyPoints), (F.vertical_flip_bounding_boxes, tv_tensors.BoundingBoxes), (F.vertical_flip_mask, tv_tensors.Mask), (F.vertical_flip_video, tv_tensors.Video), + (F.vertical_flip_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -2033,10 +2033,10 @@ def test_functional(self, make_input): (F.rotate_image, torch.Tensor), (F._geometry._rotate_image_pil, PIL.Image.Image), (F.rotate_image, tv_tensors.Image), - (F.rotate_keypoints, tv_tensors.KeyPoints), (F.rotate_bounding_boxes, tv_tensors.BoundingBoxes), (F.rotate_mask, tv_tensors.Mask), (F.rotate_video, tv_tensors.Video), + (F.rotate_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -3221,6 +3221,7 @@ def test_functional(self, make_input): (F.elastic_bounding_boxes, tv_tensors.BoundingBoxes), (F.elastic_mask, tv_tensors.Mask), (F.elastic_video, tv_tensors.Video), + (F.elastic_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -3354,6 +3355,7 @@ def test_functional(self, make_input): (F.crop_bounding_boxes, tv_tensors.BoundingBoxes), (F.crop_mask, tv_tensors.Mask), (F.crop_video, tv_tensors.Video), + (F.crop_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -4236,6 +4238,7 @@ def test_functional(self, make_input): (F.resized_crop_bounding_boxes, tv_tensors.BoundingBoxes), (F.resized_crop_mask, tv_tensors.Mask), (F.resized_crop_video, tv_tensors.Video), + (F.resized_crop_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -4626,6 +4629,7 @@ def test_functional(self, make_input): (F.center_crop_bounding_boxes, tv_tensors.BoundingBoxes), (F.center_crop_mask, tv_tensors.Mask), (F.center_crop_video, tv_tensors.Video), + (F.center_crop_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): @@ -4856,6 +4860,7 @@ def test_functional(self, make_input): (F.perspective_bounding_boxes, tv_tensors.BoundingBoxes), (F.perspective_mask, tv_tensors.Mask), (F.perspective_video, tv_tensors.Video), + (F.perspective_keypoints, tv_tensors.KeyPoints), ], ) def test_functional_signature(self, kernel, input_type): From 34ba90c97fa51348a0956bd729f5166eb2b04ecf Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 13:52:56 +0100 Subject: [PATCH 48/60] Add more rotate tests --- test/test_transforms_v2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index f1fd3ada61e..83f40a95070 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -2022,7 +2022,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional(F.rotate, make_input(), **self._MINIMAL_AFFINE_KWARGS) @@ -2044,7 +2044,7 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform(self, make_input, device): From a43e534fe6198aba20a44eeeb798a2fb0f836261 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 14:01:48 +0100 Subject: [PATCH 49/60] Add pad and crop tests --- test/test_transforms_v2.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 83f40a95070..ec08db5237a 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3332,6 +3332,13 @@ def test_kernel_bounding_boxes(self, kwargs, format, dtype, device): bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format, dtype=dtype, device=device) check_kernel(F.crop_bounding_boxes, bounding_boxes, format=format, **kwargs) + @pytest.mark.parametrize("kwargs", CORRECTNESS_CROP_KWARGS) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, kwargs, dtype, device): + keypoints = make_keypoints(self.INPUT_SIZE, dtype=dtype, device=device) + check_kernel(F.crop_keypoints, keypoints, **kwargs) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.crop_mask, make_mask(self.INPUT_SIZE), **self.MINIMAL_CROP_KWARGS) @@ -3341,7 +3348,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional(F.crop, make_input(self.INPUT_SIZE), **self.MINIMAL_CROP_KWARGS) @@ -3376,7 +3383,7 @@ def test_functional_image_correctness(self, kwargs): ) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_transform(self, param, value, make_input): input = make_input(self.INPUT_SIZE) @@ -4466,7 +4473,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional(F.pad, make_input(), padding=[1]) @@ -4491,7 +4498,7 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_transform(self, make_input): check_transform(transforms.Pad(padding=[1]), make_input()) @@ -4565,6 +4572,7 @@ def test_bounding_boxes_correctness(self, padding, format, dtype, device, fn): expected = self._reference_pad_bounding_boxes(bounding_boxes, padding=padding) assert_equal(actual, expected) + #TODOKP need keypoint correctness tests class TestCenterCrop: From 3441ab9f7f1cf03f800dd299259726d33e15a727 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 14:07:41 +0100 Subject: [PATCH 50/60] Add tests for perspective --- test/test_transforms_v2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index ec08db5237a..be9fc25ea60 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4854,7 +4854,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional(F.perspective, make_input(), **self.MINIMAL_KWARGS) @@ -4877,7 +4877,7 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize("distortion_scale", [0.5, 0.0, 1.0]) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_transform(self, distortion_scale, make_input): check_transform(transforms.RandomPerspective(distortion_scale=distortion_scale, p=1), make_input()) From 74688d620a8a6895ae128e91dbc123575ac331b0 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 14:16:58 +0100 Subject: [PATCH 51/60] More tests --- test/test_transforms_v2.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index be9fc25ea60..aa674673308 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3206,7 +3206,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): input = make_input() @@ -4214,13 +4214,14 @@ class TestResizedCrop: (F.resized_crop_mask, make_segmentation_mask), (F.resized_crop_mask, make_detection_masks), (F.resized_crop_video, make_video), + (F.resized_crop_keypoints, make_keypoints), ], ) def test_kernel(self, kernel, make_input): input = make_input(self.INPUT_SIZE) if isinstance(input, tv_tensors.BoundingBoxes): extra_kwargs = dict(format=input.format) - elif isinstance(input, tv_tensors.Mask): + elif isinstance(input, (tv_tensors.Mask, tv_tensors.KeyPoints)): extra_kwargs = dict() else: extra_kwargs = dict(antialias=True) @@ -4229,7 +4230,7 @@ def test_kernel(self, kernel, make_input): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional( @@ -4257,7 +4258,7 @@ def test_functional_signature(self, kernel, input_type): ) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_transform(self, param, value, make_input): check_transform( @@ -4623,7 +4624,7 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_functional(self, make_input): check_functional(F.center_crop, make_input(self.INPUT_SIZE), output_size=self.OUTPUT_SIZES[0]) @@ -4645,7 +4646,7 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], ) def test_transform(self, make_input): check_transform(transforms.CenterCrop(self.OUTPUT_SIZES[0]), make_input(self.INPUT_SIZE)) From a30737c740d825cf6d479a8c8edb3e3319dfacfb Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Wed, 4 Jun 2025 14:23:01 +0100 Subject: [PATCH 52/60] lint + Temporarily fix the weird TestColorJitter test failure --- test/test_transforms_v2.py | 155 +++++++++++++++++++++++++++++++++---- 1 file changed, 138 insertions(+), 17 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index aa674673308..0df77f70c59 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -1461,7 +1461,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.affine, make_input(), **self._MINIMAL_AFFINE_KWARGS) @@ -1483,7 +1491,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform(self, make_input, device): @@ -1645,7 +1661,6 @@ def _reference_affine_keypoints(self, keypoints, *, angle, translate, scale, she ), ) - @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) @pytest.mark.parametrize("scale", _CORRECTNESS_AFFINE_KWARGS["scale"]) @@ -2022,7 +2037,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.rotate, make_input(), **self._MINIMAL_AFFINE_KWARGS) @@ -2044,7 +2067,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) @pytest.mark.parametrize("device", cpu_and_cuda()) def test_transform(self, make_input, device): @@ -3206,7 +3237,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): input = make_input() @@ -3348,7 +3387,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.crop, make_input(self.INPUT_SIZE), **self.MINIMAL_CROP_KWARGS) @@ -3383,7 +3430,15 @@ def test_functional_image_correctness(self, kwargs): ) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_transform(self, param, value, make_input): input = make_input(self.INPUT_SIZE) @@ -4230,7 +4285,15 @@ def test_kernel(self, kernel, make_input): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional( @@ -4258,7 +4321,15 @@ def test_functional_signature(self, kernel, input_type): ) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_transform(self, param, value, make_input): check_transform( @@ -4474,7 +4545,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.pad, make_input(), padding=[1]) @@ -4499,7 +4578,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_transform(self, make_input): check_transform(transforms.Pad(padding=[1]), make_input()) @@ -4573,7 +4660,8 @@ def test_bounding_boxes_correctness(self, padding, format, dtype, device, fn): expected = self._reference_pad_bounding_boxes(bounding_boxes, padding=padding) assert_equal(actual, expected) - #TODOKP need keypoint correctness tests + + # TODOKP need keypoint correctness tests class TestCenterCrop: @@ -4624,7 +4712,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.center_crop, make_input(self.INPUT_SIZE), output_size=self.OUTPUT_SIZES[0]) @@ -4646,7 +4742,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_transform(self, make_input): check_transform(transforms.CenterCrop(self.OUTPUT_SIZES[0]), make_input(self.INPUT_SIZE)) @@ -4855,7 +4959,15 @@ def test_kernel_video(self): @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_functional(self, make_input): check_functional(F.perspective, make_input(), **self.MINIMAL_KWARGS) @@ -4878,7 +4990,15 @@ def test_functional_signature(self, kernel, input_type): @pytest.mark.parametrize("distortion_scale", [0.5, 0.0, 1.0]) @pytest.mark.parametrize( "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints], + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, + ], ) def test_transform(self, distortion_scale, make_input): check_transform(transforms.RandomPerspective(distortion_scale=distortion_scale, p=1), make_input()) @@ -5831,6 +5951,7 @@ def test_transform(self, make_input, dtype, device): "will degenerate to that anyway." ) + torch.manual_seed(1) # TODOKP why is this needed now?? check_transform( transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25), make_input(dtype=dtype, device=device), From e88e19feac53ce06e397666f6cca383ed3883133 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 5 Jun 2025 10:01:37 +0100 Subject: [PATCH 53/60] Remove one TODO, the number of leading dims is correctly tested --- torchvision/tv_tensors/_keypoints.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 55772cbecac..80ed194bc63 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -8,9 +8,6 @@ from ._tv_tensor import TVTensor -# TODOKP do we support arbitrary number of leading dimension, as claimed in the -# docs? -# This doesn't seem to be tested? class KeyPoints(TVTensor): """:class:`torch.Tensor` subclass for tensors with shape ``[..., 2]`` that represent points in an image. From 6977c782d66380eae451b2fe6b9328b421fc3901 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 5 Jun 2025 10:07:15 +0100 Subject: [PATCH 54/60] Add correctness test for pad --- test/test_transforms_v2.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 0df77f70c59..41aa3068e68 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4661,7 +4661,36 @@ def test_bounding_boxes_correctness(self, padding, format, dtype, device, fn): assert_equal(actual, expected) - # TODOKP need keypoint correctness tests + def _reference_pad_keypoints(self, keypoints, *, padding): + if isinstance(padding, int): + padding = [padding] + left, top, right, bottom = padding * (4 // len(padding)) + + affine_matrix = np.array( + [ + [1, 0, left], + [0, 1, top], + ], + ) + + height = keypoints.canvas_size[0] + top + bottom + width = keypoints.canvas_size[1] + left + right + + return reference_affine_keypoints_helper( + keypoints, affine_matrix=affine_matrix, new_canvas_size=(height, width) + ) + + @pytest.mark.parametrize("padding", CORRECTNESS_PADDINGS) + @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + @pytest.mark.parametrize("fn", [F.pad, transform_cls_to_functional(transforms.Pad)]) + def test_keypoints_correctness(self, padding, dtype, device, fn): + keypoints = make_keypoints(dtype=dtype, device=device) + + actual = fn(keypoints, padding=padding) + expected = self._reference_pad_keypoints(keypoints, padding=padding) + + assert_equal(actual, expected) class TestCenterCrop: From 651d17243d0bd5e48ac98fc16873e32adcc1254a Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 5 Jun 2025 10:10:20 +0100 Subject: [PATCH 55/60] Remove sanitize_keypoints, it's not clear that it's strictly needed --- torchvision/transforms/v2/_utils.py | 13 ---- torchvision/transforms/v2/functional/_misc.py | 76 ------------------- 2 files changed, 89 deletions(-) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index bc79c266bae..5ed871d0554 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -165,19 +165,6 @@ def get_bounding_boxes(flat_inputs: list[Any]) -> tv_tensors.BoundingBoxes: raise ValueError("No bounding boxes were found in the sample") -# TODOKP this is unused and un-tested -def get_keypoints(flat_inputs: list[Any]) -> tv_tensors.KeyPoints: - """Returns the KeyPoints in the input. - - Assumes only one ``KeyPoints`` object is present - """ - generator = (inpt for inpt in flat_inputs if isinstance(inpt, tv_tensors.KeyPoints)) - try: - return next(generator) - except StopIteration: - raise ValueError("No Keypoints were found in the sample.") - - def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: """Return Channel, Height, and Width.""" chws = { diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index b9ffb216aa0..7ad50b54061 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -328,82 +328,6 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo return inpt.to(dtype) -# TODOKP This is untested. Also there's no corresponding transform class -def sanitize_keypoints( - keypoints: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None -) -> tuple[torch.Tensor, torch.Tensor]: - """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. - - This removes the keypoints that are outside of their corresponing image. - - It is recommended to call it at the end of a pipeline, before passing the - input to the models. It is critical to call this transform if - :class:`~torchvision.transforms.v2.RandomIoUCrop` was called. - If you want to be extra careful, you may call it after all transforms that - may modify the key points but once at the end should be enough in most - cases. - - .. note:: - - Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`. - TODOKP Is this desirable? We probably want keypoints to behave the same as bboxes? - - Raises: - ValueError: If the keypoints are not passed as a two dimensional tensor. - - Args: - keypoints (torch.Tensor or :class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being sanitized. - Should be of shape ``[N, 2]`` - canvas_size (Optional[tuple[int, int]], optional): The canvas_size of the bounding boxes - (size of the corresponding image/video). - Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. - - Returns: - out (tuple of Tensors): The subset of valid bounding boxes, and the corresponding indexing mask. - The mask can then be used to subset other tensors (e.g. labels) that are associated with the bounding boxes. - """ - if not keypoints.ndim == 2: - if keypoints.ndim < 2: - raise ValueError("Cannot sanitize a single Keypoint") - raise ValueError( - "Cannot sanitize KeyPoints structure that are not 2D. " - f"Expected shape to be (N, 2), got {keypoints.shape} ({keypoints.ndim=}, not 2)" - ) - if torch.jit.is_scripting() or is_pure_tensor(keypoints): - if canvas_size is None: - raise ValueError( - "canvas_size cannot be None if keypoints is a pure tensor. " - f"Got canvas_size={canvas_size}." - "Set that to appropriate values or pass keypoints as a tv_tensors.KeyPoints object." - ) - valid = _get_sanitize_keypoints_mask( - keypoints, - canvas_size=canvas_size, - ) - return keypoints[valid], valid - - if not isinstance(keypoints, tv_tensors.KeyPoints): - raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") - - valid = _get_sanitize_keypoints_mask( - keypoints, - canvas_size=keypoints.canvas_size, - ) - return tv_tensors.wrap(keypoints[valid], like=keypoints), valid - - -# TODOKP Untested, see above -def _get_sanitize_keypoints_mask( - keypoints: torch.Tensor, - canvas_size: tuple[int, int], -) -> torch.Tensor: - image_h, image_w = canvas_size - x = keypoints[:, 0] - y = keypoints[:, 1] - - return (0 < x) & (x < image_w) & (0 < y) & (y < image_h) - - def sanitize_bounding_boxes( bounding_boxes: torch.Tensor, format: Optional[tv_tensors.BoundingBoxFormat] = None, From 211acf2a2c0853c86986e8cf5354790e580182b1 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 5 Jun 2025 10:41:38 +0100 Subject: [PATCH 56/60] Add ClampKeyPoints and corresponding test --- docs/source/transforms.rst | 2 ++ test/test_transforms_v2.py | 28 +++++++++++++++++++ torchvision/transforms/v2/__init__.py | 2 +- torchvision/transforms/v2/_meta.py | 12 ++++++++ .../transforms/v2/functional/__init__.py | 3 +- torchvision/transforms/v2/functional/_meta.py | 9 +++--- 6 files changed, 49 insertions(+), 7 deletions(-) diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index d2fed552c4f..99faac5a651 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -408,6 +408,7 @@ Miscellaneous v2.Lambda v2.SanitizeBoundingBoxes v2.ClampBoundingBoxes + v2.ClampKeyPoints v2.UniformTemporalSubsample v2.JPEG @@ -421,6 +422,7 @@ Functionals v2.functional.erase v2.functional.sanitize_bounding_boxes v2.functional.clamp_bounding_boxes + v2.functional.clamp_keypoints v2.functional.uniform_temporal_subsample v2.functional.jpeg diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 41aa3068e68..10f714311e4 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -5444,6 +5444,34 @@ def test_errors(self): def test_transform(self): check_transform(transforms.ClampBoundingBoxes(), make_bounding_boxes()) +class TestClampKeyPoints: + @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + check_kernel( + F.clamp_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + ) + + def test_functional(self): + check_functional(F.clamp_keypoints, make_keypoints()) + + def test_errors(self): + input_tv_tensor = make_keypoints() + input_pure_tensor = input_tv_tensor.as_subclass(torch.Tensor) + + with pytest.raises(ValueError, match="`canvas_size` has to be passed"): + F.clamp_keypoints(input_pure_tensor, canvas_size=None) + + with pytest.raises(ValueError, match="`canvas_size` must not be passed"): + F.clamp_keypoints(input_tv_tensor, canvas_size=input_tv_tensor.canvas_size) + + def test_transform(self): + check_transform(transforms.ClampKeyPoints(), make_keypoints()) + + class TestInvert: @pytest.mark.parametrize("dtype", [torch.uint8, torch.int16, torch.float32]) diff --git a/torchvision/transforms/v2/__init__.py b/torchvision/transforms/v2/__init__.py index 2d66917b6ea..82a131d6fbc 100644 --- a/torchvision/transforms/v2/__init__.py +++ b/torchvision/transforms/v2/__init__.py @@ -41,7 +41,7 @@ ScaleJitter, TenCrop, ) -from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat +from ._meta import ClampBoundingBoxes, ClampKeyPoints, ConvertBoundingBoxFormat from ._misc import ( ConvertImageDtype, GaussianBlur, diff --git a/torchvision/transforms/v2/_meta.py b/torchvision/transforms/v2/_meta.py index a8ca79598b2..e9a0987320d 100644 --- a/torchvision/transforms/v2/_meta.py +++ b/torchvision/transforms/v2/_meta.py @@ -34,3 +34,15 @@ class ClampBoundingBoxes(Transform): def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes: return F.clamp_bounding_boxes(inpt) # type: ignore[return-value] + +class ClampKeyPoints(Transform): + """Clamp keypoints to their corresponding image dimensions. + + The clamping is done according to the keypoints' ``canvas_size`` meta-data. + + """ + + _transformed_types = (tv_tensors.KeyPoints,) + + def transform(self, inpt: tv_tensors.KeyPoints, params: dict[str, Any]) -> tv_tensors.KeyPoints: + return F.clamp_keypoints(inpt) # type: ignore[return-value] diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index e651bbd9257..7792a7366e6 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -6,7 +6,7 @@ clamp_bounding_boxes, clamp_keypoints, convert_bounding_box_format, - convert_bounding_boxes_to_points, + convert_bounding_boxes_to_points, #TODOKP also needs docs get_dimensions_image, get_dimensions_video, get_dimensions, @@ -157,7 +157,6 @@ normalize_image, normalize_video, sanitize_bounding_boxes, - sanitize_keypoints, to_dtype, to_dtype_image, to_dtype_video, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 4e812fcaaf7..987afedc917 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -457,12 +457,13 @@ def clamp_bounding_boxes( def _clamp_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> torch.Tensor: dtype = keypoints.dtype keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() - keypoints[..., 0].clamp_(min=0, max=canvas_size[1]) - keypoints[..., 1].clamp_(min=0, max=canvas_size[0]) + # Note that max is canvas_size[i] - 1 and not can canvas_size[i] like for + # bounding boxes. + keypoints[..., 0].clamp_(min=0, max=canvas_size[1] - 1) + keypoints[..., 1].clamp_(min=0, max=canvas_size[0] - 1) return keypoints.to(dtype=dtype) -# TODOKP there is no corresponding transform and this isn't tested def clamp_keypoints( inpt: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None, @@ -473,7 +474,7 @@ def clamp_keypoints( if torch.jit.is_scripting() or is_pure_tensor(inpt): if canvas_size is None: - raise ValueError("For pure tensor inputs, `canvas_size` have to be passed.") + raise ValueError("For pure tensor inputs, `canvas_size` has to be passed.") return _clamp_keypoints(inpt, canvas_size=canvas_size) elif isinstance(inpt, tv_tensors.KeyPoints): if canvas_size is not None: From 0e1caea58fdaecb6945daf2ffce51c98bede3743 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 5 Jun 2025 11:28:30 +0100 Subject: [PATCH 57/60] Address bbox to keypoint conversion --- docs/source/transforms.rst | 1 + test/test_transforms_v2.py | 67 ++++++++++--------- .../transforms/v2/functional/__init__.py | 2 +- .../transforms/v2/functional/_geometry.py | 4 +- torchvision/transforms/v2/functional/_meta.py | 28 +++----- 5 files changed, 47 insertions(+), 55 deletions(-) diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 99faac5a651..a332da15f70 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -459,6 +459,7 @@ functionals v2.functional.to_pil_image v2.functional.to_dtype v2.functional.convert_bounding_box_format + v2.functional.convert_bounding_boxes_to_keypoints Deprecated diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 10f714311e4..cb923d4c6bf 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6008,7 +6008,9 @@ def test_transform(self, make_input, dtype, device): "will degenerate to that anyway." ) - torch.manual_seed(1) # TODOKP why is this needed now?? + # TODO needed to add seed after KeyPoints PR, not sure why? failure + # wasn't really significant anyway. + torch.manual_seed(1) check_transform( transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25), make_input(dtype=dtype, device=device), @@ -7194,36 +7196,35 @@ def test_no_valid_input(self, query): with pytest.raises(TypeError, match="No image"): query(["blah"]) - # TODOKP this is tested here in TestUtils but defined in meta - @pytest.mark.parametrize( - "boxes", - [ - tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 2.0, 2.0]]), format="XYXY", canvas_size=(4, 4)), - tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0]]), format="XYWH", canvas_size=(4, 4)), - tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0]]), format="CXCYWH", canvas_size=(4, 4)), - tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4)), - tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4)), - tv_tensors.BoundingBoxes( - torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XYXYXYXY", canvas_size=(4, 4) - ), - ], - ) - def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): - kp = F.convert_bounding_boxes_to_points(boxes) - assert kp.shape == (boxes.shape[0], 4, 2) - assert kp.dtype == boxes.dtype - - # We manually convert the kp back into a BoundingBoxes, and convert that - # bbox back into the original `boxes` format to compare against it. - if F._meta.is_rotated_bounding_box_format(boxes.format): - reconverted = kp.reshape(-1, 8) - intermediate_format = tv_tensors.BoundingBoxFormat.XYXYXYXY - else: - reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) - intermediate_format = tv_tensors.BoundingBoxFormat.XYXY +@pytest.mark.parametrize( + "boxes", + [ + tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 2.0, 2.0]]), format="XYXY", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0]]), format="XYWH", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0]]), format="CXCYWH", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1.0, 1.0, 45]]), format="CXCYWHR", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes(torch.tensor([[1.0, 1.0, 1.0, 1.0, 45.0]]), format="XYWHR", canvas_size=(4, 4)), + tv_tensors.BoundingBoxes( + torch.tensor([[1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0]]), format="XYXYXYXY", canvas_size=(4, 4) + ), + ], +) +def test_convert_bounding_boxes_to_keypoints(boxes: tv_tensors.BoundingBoxes): + kp = F.convert_bounding_boxes_to_keypoints(boxes) + assert kp.shape == (boxes.shape[0], 4, 2) + assert kp.dtype == boxes.dtype + + # We manually convert the kp back into a BoundingBoxes, and convert that + # bbox back into the original `boxes` format to compare against it. + if F._meta.is_rotated_bounding_box_format(boxes.format): + reconverted = kp.reshape(-1, 8) + intermediate_format = tv_tensors.BoundingBoxFormat.XYXYXYXY + else: + reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) + intermediate_format = tv_tensors.BoundingBoxFormat.XYXY - reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes(reconverted, format=intermediate_format, canvas_size=kp.canvas_size), - new_format=boxes.format, - ) - assert_equal(reconverted_bbox, boxes, atol=1e-5, rtol=0) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes(reconverted, format=intermediate_format, canvas_size=kp.canvas_size), + new_format=boxes.format, + ) + assert_equal(reconverted_bbox, boxes, atol=1e-5, rtol=0) diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index 7792a7366e6..170bbcca827 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -6,7 +6,7 @@ clamp_bounding_boxes, clamp_keypoints, convert_bounding_box_format, - convert_bounding_boxes_to_points, #TODOKP also needs docs + convert_bounding_boxes_to_keypoints, get_dimensions_image, get_dimensions_video, get_dimensions, diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index e38574d4378..e0c3a5a899b 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -70,7 +70,7 @@ def horizontal_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, i shape = keypoints.shape keypoints = keypoints.clone().reshape(-1, 2) keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1]).neg_() - return keypoints.reshape(shape) + return clamp_keypoints(keypoints.reshape(shape), canvas_size=canvas_size) @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @@ -164,7 +164,7 @@ def vertical_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int shape = keypoints.shape keypoints = keypoints.clone().reshape(-1, 2) keypoints[..., 1] = keypoints[..., 1].sub_(canvas_size[0]).neg_() - return keypoints.reshape(shape) + return clamp_keypoints(keypoints.reshape(shape), canvas_size=canvas_size) def vertical_flip_bounding_boxes( diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 987afedc917..97def1a4082 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -189,17 +189,10 @@ def _xyxyxyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 3], [4, 5], [6, 7]]] -# TODOKP Should this be in the box ops? Or in utils? rename points->keypoints. -def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: +# Note: this doesn't have a corresponding transforms class. +def convert_bounding_boxes_to_keypoints(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: """Convert a set of bounding boxes to its edge points. - .. note:: - - This handles rotated :class:`tv_tensors.BoundingBoxes` formats - by first converting them to XYXYXYXY format. - - Due to floating-point approximation, this may not be an exact computation. - Args: bounding_boxes (tv_tensors.BoundingBoxes): A set of ``N`` bounding boxes (of shape ``[N, 4]``) @@ -207,22 +200,19 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) - tv_tensors.KeyPoints: The edges, as a polygon of shape ``[N, 4, 2]`` """ if is_rotated_bounding_box_format(bounding_boxes.format): - # We are working on a rotated bounding box - bbox = _convert_bounding_box_format( - bounding_boxes.as_subclass(torch.Tensor), - old_format=bounding_boxes.format, - new_format=BoundingBoxFormat.XYXYXYXY, - inplace=False, - ) - return tv_tensors.KeyPoints(_xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) + intermediate_format = BoundingBoxFormat.XYXYXYXY + to_keypoints = _xyxyxyxy_to_keypoints + else: + intermediate_format = BoundingBoxFormat.XYXY + to_keypoints = _xyxy_to_keypoints bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, - new_format=BoundingBoxFormat.XYXY, + new_format=intermediate_format, inplace=False, ) - return tv_tensors.KeyPoints(_xyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) + return tv_tensors.KeyPoints(to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) def _cxcywhr_to_xywhr(cxcywhr: torch.Tensor, inplace: bool) -> torch.Tensor: From 6c481cf7f1b87fa0ea9fefd108b960a37050f781 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 5 Jun 2025 11:32:31 +0100 Subject: [PATCH 58/60] Lint --- test/test_transforms_v2.py | 3 ++- torchvision/transforms/v2/_meta.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index cb923d4c6bf..77e59165b3b 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -5444,6 +5444,7 @@ def test_errors(self): def test_transform(self): check_transform(transforms.ClampBoundingBoxes(), make_bounding_boxes()) + class TestClampKeyPoints: @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) @pytest.mark.parametrize("device", cpu_and_cuda()) @@ -5472,7 +5473,6 @@ def test_transform(self): check_transform(transforms.ClampKeyPoints(), make_keypoints()) - class TestInvert: @pytest.mark.parametrize("dtype", [torch.uint8, torch.int16, torch.float32]) @pytest.mark.parametrize("device", cpu_and_cuda()) @@ -7196,6 +7196,7 @@ def test_no_valid_input(self, query): with pytest.raises(TypeError, match="No image"): query(["blah"]) + @pytest.mark.parametrize( "boxes", [ diff --git a/torchvision/transforms/v2/_meta.py b/torchvision/transforms/v2/_meta.py index e9a0987320d..0d938d98077 100644 --- a/torchvision/transforms/v2/_meta.py +++ b/torchvision/transforms/v2/_meta.py @@ -35,6 +35,7 @@ class ClampBoundingBoxes(Transform): def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes: return F.clamp_bounding_boxes(inpt) # type: ignore[return-value] + class ClampKeyPoints(Transform): """Clamp keypoints to their corresponding image dimensions. From da82e57c76d5545573ddbec0d1f0c33180e7030b Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 6 Jun 2025 11:26:18 +0100 Subject: [PATCH 59/60] Fix flip --- test/test_transforms_v2.py | 4 ++-- torchvision/transforms/v2/functional/_geometry.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 77e59165b3b..6d18fdf4860 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -1307,7 +1307,7 @@ def test_bounding_boxes_correctness(self, format, fn): def _reference_horizontal_flip_keypoints(self, keypoints): affine_matrix = np.array( [ - [-1, 0, keypoints.canvas_size[1]], + [-1, 0, keypoints.canvas_size[1] - 1], [0, 1, 0], ], ) @@ -1907,7 +1907,7 @@ def _reference_vertical_flip_keypoints(self, keypoints): affine_matrix = np.array( [ [1, 0, 0], - [0, -1, keypoints.canvas_size[0]], + [0, -1, keypoints.canvas_size[0] - 1], ], ) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index e0c3a5a899b..2a2a20e93ef 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -69,7 +69,7 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: def horizontal_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]): shape = keypoints.shape keypoints = keypoints.clone().reshape(-1, 2) - keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1]).neg_() + keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1] - 1).neg_() return clamp_keypoints(keypoints.reshape(shape), canvas_size=canvas_size) @@ -163,7 +163,7 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: def vertical_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> torch.Tensor: shape = keypoints.shape keypoints = keypoints.clone().reshape(-1, 2) - keypoints[..., 1] = keypoints[..., 1].sub_(canvas_size[0]).neg_() + keypoints[..., 1] = keypoints[..., 1].sub_(canvas_size[0] - 1).neg_() return clamp_keypoints(keypoints.reshape(shape), canvas_size=canvas_size) From 27886629637ef32b801b3856f1329a72e6e0d588 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 6 Jun 2025 13:11:50 +0100 Subject: [PATCH 60/60] resizedCrop test --- test/test_transforms_v2.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 6d18fdf4860..955a04d57ca 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -4380,6 +4380,18 @@ def _reference_resized_crop_bounding_boxes(self, bounding_boxes, *, top, left, h new_canvas_size=size, ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) + def test_functional_bounding_boxes_correctness(self, format): + bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format) + + actual = F.resized_crop(bounding_boxes, **self.CROP_KWARGS, size=self.OUTPUT_SIZE) + expected = self._reference_resized_crop_bounding_boxes( + bounding_boxes, **self.CROP_KWARGS, size=self.OUTPUT_SIZE + ) + + assert_equal(actual, expected) + assert_equal(F.get_size(actual), F.get_size(expected)) + def _reference_resized_crop_keypoints(self, keypoints, *, top, left, height, width, size): new_height, new_width = size @@ -4397,26 +4409,17 @@ def _reference_resized_crop_keypoints(self, keypoints, *, top, left, height, wid [0, 0, 1], ], ) - affine_matrix = (resize_affine_matrix @ crop_affine_matrix)[:2, :] - - return reference_affine_keypoints_helper( + intermediate_keypoints = reference_affine_keypoints_helper( keypoints, - affine_matrix=affine_matrix, - new_canvas_size=size, + affine_matrix=crop_affine_matrix, + new_canvas_size=(height, width), ) - - @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) - def test_functional_bounding_boxes_correctness(self, format): - bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format) - - actual = F.resized_crop(bounding_boxes, **self.CROP_KWARGS, size=self.OUTPUT_SIZE) - expected = self._reference_resized_crop_bounding_boxes( - bounding_boxes, **self.CROP_KWARGS, size=self.OUTPUT_SIZE + return reference_affine_keypoints_helper( + intermediate_keypoints, + affine_matrix=resize_affine_matrix, + new_canvas_size=size, ) - assert_equal(actual, expected) - assert_equal(F.get_size(actual), F.get_size(expected)) - def test_functional_keypoints_correctness(self): keypoints = make_keypoints(self.INPUT_SIZE)