|
19 | 19 | ) |
20 | 20 | from torchvision.transforms.functional_tensor import _pad_symmetric |
21 | 21 |
|
| 22 | +from torchvision.utils import _log_api_usage_once |
| 23 | + |
22 | 24 | from ._meta import convert_format_bounding_box, get_spatial_size_image_pil |
23 | 25 |
|
24 | 26 |
|
@@ -55,6 +57,9 @@ def horizontal_flip_video(video: torch.Tensor) -> torch.Tensor: |
55 | 57 |
|
56 | 58 |
|
57 | 59 | def horizontal_flip(inpt: datapoints.InputTypeJIT) -> datapoints.InputTypeJIT: |
| 60 | + if not torch.jit.is_scripting(): |
| 61 | + _log_api_usage_once(horizontal_flip) |
| 62 | + |
58 | 63 | if isinstance(inpt, torch.Tensor) and ( |
59 | 64 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
60 | 65 | ): |
@@ -103,6 +108,9 @@ def vertical_flip_video(video: torch.Tensor) -> torch.Tensor: |
103 | 108 |
|
104 | 109 |
|
105 | 110 | def vertical_flip(inpt: datapoints.InputTypeJIT) -> datapoints.InputTypeJIT: |
| 111 | + if not torch.jit.is_scripting(): |
| 112 | + _log_api_usage_once(vertical_flip) |
| 113 | + |
106 | 114 | if isinstance(inpt, torch.Tensor) and ( |
107 | 115 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
108 | 116 | ): |
@@ -231,6 +239,8 @@ def resize( |
231 | 239 | max_size: Optional[int] = None, |
232 | 240 | antialias: Optional[bool] = None, |
233 | 241 | ) -> datapoints.InputTypeJIT: |
| 242 | + if not torch.jit.is_scripting(): |
| 243 | + _log_api_usage_once(resize) |
234 | 244 | if isinstance(inpt, torch.Tensor) and ( |
235 | 245 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
236 | 246 | ): |
@@ -730,6 +740,9 @@ def affine( |
730 | 740 | fill: datapoints.FillTypeJIT = None, |
731 | 741 | center: Optional[List[float]] = None, |
732 | 742 | ) -> datapoints.InputTypeJIT: |
| 743 | + if not torch.jit.is_scripting(): |
| 744 | + _log_api_usage_once(affine) |
| 745 | + |
733 | 746 | # TODO: consider deprecating integers from angle and shear on the future |
734 | 747 | if isinstance(inpt, torch.Tensor) and ( |
735 | 748 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
@@ -913,6 +926,9 @@ def rotate( |
913 | 926 | center: Optional[List[float]] = None, |
914 | 927 | fill: datapoints.FillTypeJIT = None, |
915 | 928 | ) -> datapoints.InputTypeJIT: |
| 929 | + if not torch.jit.is_scripting(): |
| 930 | + _log_api_usage_once(rotate) |
| 931 | + |
916 | 932 | if isinstance(inpt, torch.Tensor) and ( |
917 | 933 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
918 | 934 | ): |
@@ -1120,6 +1136,9 @@ def pad( |
1120 | 1136 | fill: datapoints.FillTypeJIT = None, |
1121 | 1137 | padding_mode: str = "constant", |
1122 | 1138 | ) -> datapoints.InputTypeJIT: |
| 1139 | + if not torch.jit.is_scripting(): |
| 1140 | + _log_api_usage_once(pad) |
| 1141 | + |
1123 | 1142 | if isinstance(inpt, torch.Tensor) and ( |
1124 | 1143 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
1125 | 1144 | ): |
@@ -1197,6 +1216,9 @@ def crop_video(video: torch.Tensor, top: int, left: int, height: int, width: int |
1197 | 1216 |
|
1198 | 1217 |
|
1199 | 1218 | def crop(inpt: datapoints.InputTypeJIT, top: int, left: int, height: int, width: int) -> datapoints.InputTypeJIT: |
| 1219 | + if not torch.jit.is_scripting(): |
| 1220 | + _log_api_usage_once(crop) |
| 1221 | + |
1200 | 1222 | if isinstance(inpt, torch.Tensor) and ( |
1201 | 1223 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
1202 | 1224 | ): |
@@ -1452,6 +1474,8 @@ def perspective( |
1452 | 1474 | fill: datapoints.FillTypeJIT = None, |
1453 | 1475 | coefficients: Optional[List[float]] = None, |
1454 | 1476 | ) -> datapoints.InputTypeJIT: |
| 1477 | + if not torch.jit.is_scripting(): |
| 1478 | + _log_api_usage_once(perspective) |
1455 | 1479 | if isinstance(inpt, torch.Tensor) and ( |
1456 | 1480 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
1457 | 1481 | ): |
@@ -1612,6 +1636,9 @@ def elastic( |
1612 | 1636 | interpolation: InterpolationMode = InterpolationMode.BILINEAR, |
1613 | 1637 | fill: datapoints.FillTypeJIT = None, |
1614 | 1638 | ) -> datapoints.InputTypeJIT: |
| 1639 | + if not torch.jit.is_scripting(): |
| 1640 | + _log_api_usage_once(elastic) |
| 1641 | + |
1615 | 1642 | if isinstance(inpt, torch.Tensor) and ( |
1616 | 1643 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
1617 | 1644 | ): |
@@ -1724,6 +1751,9 @@ def center_crop_video(video: torch.Tensor, output_size: List[int]) -> torch.Tens |
1724 | 1751 |
|
1725 | 1752 |
|
1726 | 1753 | def center_crop(inpt: datapoints.InputTypeJIT, output_size: List[int]) -> datapoints.InputTypeJIT: |
| 1754 | + if not torch.jit.is_scripting(): |
| 1755 | + _log_api_usage_once(center_crop) |
| 1756 | + |
1727 | 1757 | if isinstance(inpt, torch.Tensor) and ( |
1728 | 1758 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
1729 | 1759 | ): |
@@ -1817,6 +1847,9 @@ def resized_crop( |
1817 | 1847 | interpolation: InterpolationMode = InterpolationMode.BILINEAR, |
1818 | 1848 | antialias: Optional[bool] = None, |
1819 | 1849 | ) -> datapoints.InputTypeJIT: |
| 1850 | + if not torch.jit.is_scripting(): |
| 1851 | + _log_api_usage_once(resized_crop) |
| 1852 | + |
1820 | 1853 | if isinstance(inpt, torch.Tensor) and ( |
1821 | 1854 | torch.jit.is_scripting() or not isinstance(inpt, datapoints._datapoint.Datapoint) |
1822 | 1855 | ): |
@@ -1897,6 +1930,9 @@ def five_crop_video( |
1897 | 1930 | def five_crop( |
1898 | 1931 | inpt: ImageOrVideoTypeJIT, size: List[int] |
1899 | 1932 | ) -> Tuple[ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT]: |
| 1933 | + if not torch.jit.is_scripting(): |
| 1934 | + _log_api_usage_once(five_crop) |
| 1935 | + |
1900 | 1936 | # TODO: consider breaking BC here to return List[datapoints.ImageTypeJIT/VideoTypeJIT] to align this op with |
1901 | 1937 | # `ten_crop` |
1902 | 1938 | if isinstance(inpt, torch.Tensor) and ( |
@@ -1952,6 +1988,9 @@ def ten_crop_video(video: torch.Tensor, size: List[int], vertical_flip: bool = F |
1952 | 1988 | def ten_crop( |
1953 | 1989 | inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT], size: List[int], vertical_flip: bool = False |
1954 | 1990 | ) -> Union[List[datapoints.ImageTypeJIT], List[datapoints.VideoTypeJIT]]: |
| 1991 | + if not torch.jit.is_scripting(): |
| 1992 | + _log_api_usage_once(ten_crop) |
| 1993 | + |
1955 | 1994 | if isinstance(inpt, torch.Tensor) and ( |
1956 | 1995 | torch.jit.is_scripting() or not isinstance(inpt, (datapoints.Image, datapoints.Video)) |
1957 | 1996 | ): |
|
0 commit comments