Skip to content

Commit b45969a

Browse files
authored
[prototype] Clean up features area (#6834)
* Clean ups on `features` area * remove unncessary imports
1 parent 7de68b0 commit b45969a

File tree

7 files changed

+11
-46
lines changed

7 files changed

+11
-46
lines changed
Lines changed: 2 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,7 @@
11
from ._bounding_box import BoundingBox, BoundingBoxFormat
22
from ._encoded import EncodedData, EncodedImage
33
from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor
4-
from ._image import (
5-
ColorSpace,
6-
Image,
7-
ImageType,
8-
ImageTypeJIT,
9-
LegacyImageType,
10-
LegacyImageTypeJIT,
11-
TensorImageType,
12-
TensorImageTypeJIT,
13-
)
4+
from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
145
from ._label import Label, OneHotLabel
156
from ._mask import Mask
16-
from ._video import (
17-
LegacyVideoType,
18-
LegacyVideoTypeJIT,
19-
TensorVideoType,
20-
TensorVideoTypeJIT,
21-
Video,
22-
VideoType,
23-
VideoTypeJIT,
24-
)
7+
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT

torchvision/prototype/features/_bounding_box.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -61,18 +61,6 @@ def wrap_like(
6161
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
6262
return self._make_repr(format=self.format, spatial_size=self.spatial_size)
6363

64-
def to_format(self, format: Union[str, BoundingBoxFormat]) -> BoundingBox:
65-
if isinstance(format, str):
66-
format = BoundingBoxFormat.from_str(format.upper())
67-
68-
return BoundingBox.wrap_like(
69-
self,
70-
self._F.convert_format_bounding_box(
71-
self.as_subclass(torch.Tensor), old_format=self.format, new_format=format
72-
),
73-
format=format,
74-
)
75-
7664
def horizontal_flip(self) -> BoundingBox:
7765
output = self._F.horizontal_flip_bounding_box(
7866
self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size

torchvision/prototype/features/_image.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import annotations
22

33
import warnings
4-
from typing import Any, cast, List, Optional, Tuple, Union
4+
from typing import Any, List, Optional, Tuple, Union
55

66
import PIL.Image
77
import torch
@@ -104,7 +104,7 @@ def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[overr
104104

105105
@property
106106
def spatial_size(self) -> Tuple[int, int]:
107-
return cast(Tuple[int, int], tuple(self.shape[-2:]))
107+
return tuple(self.shape[-2:]) # type: ignore[return-value]
108108

109109
@property
110110
def num_channels(self) -> int:
@@ -285,7 +285,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N
285285

286286
ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
287287
ImageTypeJIT = torch.Tensor
288-
LegacyImageType = Union[torch.Tensor, PIL.Image.Image]
289-
LegacyImageTypeJIT = torch.Tensor
290288
TensorImageType = Union[torch.Tensor, Image]
291289
TensorImageTypeJIT = torch.Tensor

torchvision/prototype/features/_mask.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import annotations
22

3-
from typing import Any, cast, List, Optional, Tuple, Union
3+
from typing import Any, List, Optional, Tuple, Union
44

55
import torch
66
from torchvision.transforms import InterpolationMode
@@ -34,7 +34,7 @@ def wrap_like(
3434

3535
@property
3636
def spatial_size(self) -> Tuple[int, int]:
37-
return cast(Tuple[int, int], tuple(self.shape[-2:]))
37+
return tuple(self.shape[-2:]) # type: ignore[return-value]
3838

3939
def horizontal_flip(self) -> Mask:
4040
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))

torchvision/prototype/features/_video.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from __future__ import annotations
22

33
import warnings
4-
from typing import Any, cast, List, Optional, Tuple, Union
4+
from typing import Any, List, Optional, Tuple, Union
55

66
import torch
77
from torchvision.transforms.functional import InterpolationMode
@@ -56,7 +56,7 @@ def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[overr
5656

5757
@property
5858
def spatial_size(self) -> Tuple[int, int]:
59-
return cast(Tuple[int, int], tuple(self.shape[-2:]))
59+
return tuple(self.shape[-2:]) # type: ignore[return-value]
6060

6161
@property
6262
def num_channels(self) -> int:
@@ -237,7 +237,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N
237237

238238
VideoType = Union[torch.Tensor, Video]
239239
VideoTypeJIT = torch.Tensor
240-
LegacyVideoType = torch.Tensor
241-
LegacyVideoTypeJIT = torch.Tensor
242240
TensorVideoType = Union[torch.Tensor, Video]
243241
TensorVideoTypeJIT = torch.Tensor

torchvision/prototype/transforms/functional/_deprecated.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima
2323

2424

2525
def rgb_to_grayscale(
26-
inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1
27-
) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]:
26+
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT], num_output_channels: int = 1
27+
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
2828
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
2929
inpt = inpt.as_subclass(torch.Tensor)
3030
old_color_space = None

torchvision/prototype/transforms/functional/_misc.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,7 @@ def normalize_image_tensor(
1616
raise TypeError(f"Input tensor should be a float tensor. Got {image.dtype}.")
1717

1818
if image.ndim < 3:
19-
raise ValueError(
20-
f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {image.size()}"
21-
)
19+
raise ValueError(f"Expected tensor to be a tensor image of size (..., C, H, W). Got {image.shape}.")
2220

2321
if isinstance(std, (tuple, list)):
2422
divzero = not all(std)

0 commit comments

Comments
 (0)