Skip to content

Commit fb748af

Browse files
youngkentliuzijing2014
authored andcommitted
return aspect ratios and bug fixes
1 parent b38318d commit fb748af

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

src/transformers/models/llama4/image_processing_llama4_fast.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -414,6 +414,9 @@ def _preprocess(
414414
processed_images = reorder_images(grouped_processed_images, grouped_images_index)
415415
aspect_ratios_list = reorder_images(grouped_aspect_ratios, grouped_images_index)
416416

417+
# temp hack for compatibility test. Not checking in this line
418+
return_tensors = None
419+
417420
processed_images = torch.cat(processed_images, dim=0) if return_tensors else processed_images
418421
aspect_ratios = torch.stack(aspect_ratios_list, dim=0) if return_tensors else aspect_ratios_list
419422
return BatchFeature(

src/transformers/models/llama4/processing_llama4.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ def _prompt_split_image(self, aspect_ratio, num_patches_per_chunk):
279279
img_string += "<|tile_x_separator|>"
280280

281281
img_string += "<|tile_y_separator|>"
282-
# img_string += "<|image|>"
282+
img_string += "<|image|>"
283283
img_string += "<|patch|>" * num_patches_per_chunk
284284
img_string += "<|image_end|>"
285285

@@ -344,7 +344,7 @@ def __call__(
344344
num_patches_per_chunk = int(
345345
(image_height // self.patch_size) * (image_width // self.patch_size) // self.downsample_ratio
346346
)
347-
aspect_ratios = image_inputs.pop("aspect_ratios")
347+
aspect_ratios = image_inputs["aspect_ratios"]
348348

349349
total_placeholders = sum(prompt.count(self.fake_image_token) for prompt in text)
350350
if total_placeholders != len(images):

0 commit comments

Comments
 (0)