@@ -3110,3 +3110,136 @@ def test_errors(self):
31103110 F .convert_bounding_box_format (
31113111 input_tv_tensor , old_format = input_tv_tensor .format , new_format = input_tv_tensor .format
31123112 )
3113+
3114+
3115+ class TestResizedCrop :
3116+ INPUT_SIZE = (17 , 11 )
3117+ CROP_KWARGS = dict (top = 2 , left = 2 , height = 5 , width = 7 )
3118+ OUTPUT_SIZE = (19 , 32 )
3119+
3120+ @pytest .mark .parametrize (
3121+ ("kernel" , "make_input" ),
3122+ [
3123+ (F .resized_crop_image , make_image ),
3124+ (F .resized_crop_bounding_boxes , make_bounding_boxes ),
3125+ (F .resized_crop_mask , make_segmentation_mask ),
3126+ (F .resized_crop_mask , make_detection_mask ),
3127+ (F .resized_crop_video , make_video ),
3128+ ],
3129+ )
3130+ def test_kernel (self , kernel , make_input ):
3131+ input = make_input (self .INPUT_SIZE )
3132+ if isinstance (input , tv_tensors .BoundingBoxes ):
3133+ extra_kwargs = dict (format = input .format )
3134+ elif isinstance (input , tv_tensors .Mask ):
3135+ extra_kwargs = dict ()
3136+ else :
3137+ extra_kwargs = dict (antialias = True )
3138+
3139+ check_kernel (kernel , input , ** self .CROP_KWARGS , size = self .OUTPUT_SIZE , ** extra_kwargs )
3140+
3141+ @pytest .mark .parametrize (
3142+ "make_input" ,
3143+ [make_image_tensor , make_image_pil , make_image , make_bounding_boxes , make_segmentation_mask , make_video ],
3144+ )
3145+ def test_functional (self , make_input ):
3146+ check_functional (
3147+ F .resized_crop , make_input (self .INPUT_SIZE ), ** self .CROP_KWARGS , size = self .OUTPUT_SIZE , antialias = True
3148+ )
3149+
3150+ @pytest .mark .parametrize (
3151+ ("kernel" , "input_type" ),
3152+ [
3153+ (F .resized_crop_image , torch .Tensor ),
3154+ (F ._resized_crop_image_pil , PIL .Image .Image ),
3155+ (F .resized_crop_image , tv_tensors .Image ),
3156+ (F .resized_crop_bounding_boxes , tv_tensors .BoundingBoxes ),
3157+ (F .resized_crop_mask , tv_tensors .Mask ),
3158+ (F .resized_crop_video , tv_tensors .Video ),
3159+ ],
3160+ )
3161+ def test_functional_signature (self , kernel , input_type ):
3162+ check_functional_kernel_signature_match (F .resized_crop , kernel = kernel , input_type = input_type )
3163+
3164+ @param_value_parametrization (
3165+ scale = [(0.1 , 0.2 ), [0.0 , 1.0 ]],
3166+ ratio = [(0.3 , 0.7 ), [0.1 , 5.0 ]],
3167+ )
3168+ @pytest .mark .parametrize (
3169+ "make_input" ,
3170+ [make_image_tensor , make_image_pil , make_image , make_bounding_boxes , make_segmentation_mask , make_video ],
3171+ )
3172+ def test_transform (self , param , value , make_input ):
3173+ check_transform (
3174+ transforms .RandomResizedCrop (size = self .OUTPUT_SIZE , ** {param : value }, antialias = True ),
3175+ make_input (self .INPUT_SIZE ),
3176+ check_v1_compatibility = dict (rtol = 0 , atol = 1 ),
3177+ )
3178+
3179+ # `InterpolationMode.NEAREST` is modeled after the buggy `INTER_NEAREST` interpolation of CV2.
3180+ # The PIL equivalent of `InterpolationMode.NEAREST` is `InterpolationMode.NEAREST_EXACT`
3181+ @pytest .mark .parametrize ("interpolation" , set (INTERPOLATION_MODES ) - {transforms .InterpolationMode .NEAREST })
3182+ def test_functional_image_correctness (self , interpolation ):
3183+ image = make_image (self .INPUT_SIZE , dtype = torch .uint8 )
3184+
3185+ actual = F .resized_crop (
3186+ image , ** self .CROP_KWARGS , size = self .OUTPUT_SIZE , interpolation = interpolation , antialias = True
3187+ )
3188+ expected = F .to_image (
3189+ F .resized_crop (
3190+ F .to_pil_image (image ), ** self .CROP_KWARGS , size = self .OUTPUT_SIZE , interpolation = interpolation
3191+ )
3192+ )
3193+
3194+ torch .testing .assert_close (actual , expected , atol = 1 , rtol = 0 )
3195+
3196+ def _reference_resized_crop_bounding_boxes (self , bounding_boxes , * , top , left , height , width , size ):
3197+ new_height , new_width = size
3198+
3199+ crop_affine_matrix = np .array (
3200+ [
3201+ [1 , 0 , - left ],
3202+ [0 , 1 , - top ],
3203+ [0 , 0 , 1 ],
3204+ ],
3205+ )
3206+ resize_affine_matrix = np .array (
3207+ [
3208+ [new_width / width , 0 , 0 ],
3209+ [0 , new_height / height , 0 ],
3210+ [0 , 0 , 1 ],
3211+ ],
3212+ )
3213+ affine_matrix = (resize_affine_matrix @ crop_affine_matrix )[:2 , :]
3214+
3215+ return reference_affine_bounding_boxes_helper (
3216+ bounding_boxes ,
3217+ affine_matrix = affine_matrix ,
3218+ new_canvas_size = size ,
3219+ )
3220+
3221+ @pytest .mark .parametrize ("format" , list (tv_tensors .BoundingBoxFormat ))
3222+ def test_functional_bounding_boxes_correctness (self , format ):
3223+ bounding_boxes = make_bounding_boxes (self .INPUT_SIZE , format = format )
3224+
3225+ actual = F .resized_crop (bounding_boxes , ** self .CROP_KWARGS , size = self .OUTPUT_SIZE )
3226+ expected = self ._reference_resized_crop_bounding_boxes (
3227+ bounding_boxes , ** self .CROP_KWARGS , size = self .OUTPUT_SIZE
3228+ )
3229+
3230+ assert_equal (actual , expected )
3231+ assert_equal (F .get_size (actual ), F .get_size (expected ))
3232+
3233+ def test_transform_errors_warnings (self ):
3234+ with pytest .raises (ValueError , match = "provide only two dimensions" ):
3235+ transforms .RandomResizedCrop (size = (1 , 2 , 3 ))
3236+
3237+ with pytest .raises (TypeError , match = "Scale should be a sequence" ):
3238+ transforms .RandomResizedCrop (size = self .INPUT_SIZE , scale = 123 )
3239+
3240+ with pytest .raises (TypeError , match = "Ratio should be a sequence" ):
3241+ transforms .RandomResizedCrop (size = self .INPUT_SIZE , ratio = 123 )
3242+
3243+ for param in ["scale" , "ratio" ]:
3244+ with pytest .warns (match = "Scale and ratio should be of kind" ):
3245+ transforms .RandomResizedCrop (size = self .INPUT_SIZE , ** {param : [1 , 0 ]})
0 commit comments