|
11 | 11 | import torch |
12 | 12 | import torchvision.transforms.v2 as v2_transforms |
13 | 13 | from common_utils import assert_close, assert_equal, set_rng_seed |
14 | | -from torch import nn |
15 | 14 | from torchvision import transforms as legacy_transforms, tv_tensors |
16 | | -from torchvision._utils import sequence_to_str |
17 | 15 |
|
18 | 16 | from torchvision.transforms import functional as legacy_F |
19 | 17 | from torchvision.transforms.v2 import functional as prototype_F |
@@ -71,63 +69,7 @@ def __init__( |
71 | 69 | LINEAR_TRANSFORMATION_MEAN = torch.rand(36) |
72 | 70 | LINEAR_TRANSFORMATION_MATRIX = torch.rand([LINEAR_TRANSFORMATION_MEAN.numel()] * 2) |
73 | 71 |
|
74 | | -CONSISTENCY_CONFIGS = [ |
75 | | - ConsistencyConfig( |
76 | | - v2_transforms.Compose, |
77 | | - legacy_transforms.Compose, |
78 | | - ), |
79 | | - ConsistencyConfig( |
80 | | - v2_transforms.RandomApply, |
81 | | - legacy_transforms.RandomApply, |
82 | | - ), |
83 | | - ConsistencyConfig( |
84 | | - v2_transforms.RandomChoice, |
85 | | - legacy_transforms.RandomChoice, |
86 | | - ), |
87 | | - ConsistencyConfig( |
88 | | - v2_transforms.RandomOrder, |
89 | | - legacy_transforms.RandomOrder, |
90 | | - ), |
91 | | -] |
92 | | - |
93 | | - |
94 | | -@pytest.mark.parametrize("config", CONSISTENCY_CONFIGS, ids=lambda config: config.legacy_cls.__name__) |
95 | | -def test_signature_consistency(config): |
96 | | - legacy_params = dict(inspect.signature(config.legacy_cls).parameters) |
97 | | - prototype_params = dict(inspect.signature(config.prototype_cls).parameters) |
98 | | - |
99 | | - for param in config.removed_params: |
100 | | - legacy_params.pop(param, None) |
101 | | - |
102 | | - missing = legacy_params.keys() - prototype_params.keys() |
103 | | - if missing: |
104 | | - raise AssertionError( |
105 | | - f"The prototype transform does not support the parameters " |
106 | | - f"{sequence_to_str(sorted(missing), separate_last='and ')}, but the legacy transform does. " |
107 | | - f"If that is intentional, e.g. pending deprecation, please add the parameters to the `removed_params` on " |
108 | | - f"the `ConsistencyConfig`." |
109 | | - ) |
110 | | - |
111 | | - extra = prototype_params.keys() - legacy_params.keys() |
112 | | - extra_without_default = { |
113 | | - param |
114 | | - for param in extra |
115 | | - if prototype_params[param].default is inspect.Parameter.empty |
116 | | - and prototype_params[param].kind not in {inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD} |
117 | | - } |
118 | | - if extra_without_default: |
119 | | - raise AssertionError( |
120 | | - f"The prototype transform requires the parameters " |
121 | | - f"{sequence_to_str(sorted(extra_without_default), separate_last='and ')}, but the legacy transform does " |
122 | | - f"not. Please add a default value." |
123 | | - ) |
124 | | - |
125 | | - legacy_signature = list(legacy_params.keys()) |
126 | | - # Since we made sure that we don't have any extra parameters without default above, we clamp the prototype signature |
127 | | - # to the same number of parameters as the legacy one |
128 | | - prototype_signature = list(prototype_params.keys())[: len(legacy_signature)] |
129 | | - |
130 | | - assert prototype_signature == legacy_signature |
| 72 | +CONSISTENCY_CONFIGS = [] |
131 | 73 |
|
132 | 74 |
|
133 | 75 | def check_call_consistency( |
@@ -288,84 +230,6 @@ def test_jit_consistency(config, args_kwargs): |
288 | 230 | assert_close(output_prototype_scripted, output_legacy_scripted, **config.closeness_kwargs) |
289 | 231 |
|
290 | 232 |
|
291 | | -class TestContainerTransforms: |
292 | | - """ |
293 | | - Since we are testing containers here, we also need some transforms to wrap. Thus, testing a container transform for |
294 | | - consistency automatically tests the wrapped transforms consistency. |
295 | | -
|
296 | | - Instead of complicated mocking or creating custom transforms just for these tests, here we use deterministic ones |
297 | | - that were already tested for consistency above. |
298 | | - """ |
299 | | - |
300 | | - def test_compose(self): |
301 | | - prototype_transform = v2_transforms.Compose( |
302 | | - [ |
303 | | - v2_transforms.Resize(256), |
304 | | - v2_transforms.CenterCrop(224), |
305 | | - ] |
306 | | - ) |
307 | | - legacy_transform = legacy_transforms.Compose( |
308 | | - [ |
309 | | - legacy_transforms.Resize(256), |
310 | | - legacy_transforms.CenterCrop(224), |
311 | | - ] |
312 | | - ) |
313 | | - |
314 | | - # atol=1 due to Resize v2 is using native uint8 interpolate path for bilinear and nearest modes |
315 | | - check_call_consistency(prototype_transform, legacy_transform, closeness_kwargs=dict(rtol=0, atol=1)) |
316 | | - |
317 | | - @pytest.mark.parametrize("p", [0, 0.1, 0.5, 0.9, 1]) |
318 | | - @pytest.mark.parametrize("sequence_type", [list, nn.ModuleList]) |
319 | | - def test_random_apply(self, p, sequence_type): |
320 | | - prototype_transform = v2_transforms.RandomApply( |
321 | | - sequence_type( |
322 | | - [ |
323 | | - v2_transforms.Resize(256), |
324 | | - v2_transforms.CenterCrop(224), |
325 | | - ] |
326 | | - ), |
327 | | - p=p, |
328 | | - ) |
329 | | - legacy_transform = legacy_transforms.RandomApply( |
330 | | - sequence_type( |
331 | | - [ |
332 | | - legacy_transforms.Resize(256), |
333 | | - legacy_transforms.CenterCrop(224), |
334 | | - ] |
335 | | - ), |
336 | | - p=p, |
337 | | - ) |
338 | | - |
339 | | - # atol=1 due to Resize v2 is using native uint8 interpolate path for bilinear and nearest modes |
340 | | - check_call_consistency(prototype_transform, legacy_transform, closeness_kwargs=dict(rtol=0, atol=1)) |
341 | | - |
342 | | - if sequence_type is nn.ModuleList: |
343 | | - # quick and dirty test that it is jit-scriptable |
344 | | - scripted = torch.jit.script(prototype_transform) |
345 | | - scripted(torch.rand(1, 3, 300, 300)) |
346 | | - |
347 | | - # We can't test other values for `p` since the random parameter generation is different |
348 | | - @pytest.mark.parametrize("probabilities", [(0, 1), (1, 0)]) |
349 | | - def test_random_choice(self, probabilities): |
350 | | - prototype_transform = v2_transforms.RandomChoice( |
351 | | - [ |
352 | | - v2_transforms.Resize(256), |
353 | | - legacy_transforms.CenterCrop(224), |
354 | | - ], |
355 | | - p=probabilities, |
356 | | - ) |
357 | | - legacy_transform = legacy_transforms.RandomChoice( |
358 | | - [ |
359 | | - legacy_transforms.Resize(256), |
360 | | - legacy_transforms.CenterCrop(224), |
361 | | - ], |
362 | | - p=probabilities, |
363 | | - ) |
364 | | - |
365 | | - # atol=1 due to Resize v2 is using native uint8 interpolate path for bilinear and nearest modes |
366 | | - check_call_consistency(prototype_transform, legacy_transform, closeness_kwargs=dict(rtol=0, atol=1)) |
367 | | - |
368 | | - |
369 | 233 | class TestToTensorTransforms: |
370 | 234 | def test_pil_to_tensor(self): |
371 | 235 | prototype_transform = v2_transforms.PILToTensor() |
|
0 commit comments