Skip to content

Commit ca7d46b

Browse files
DarkLight1337DefTruth
authored andcommitted
[Bugfix] Explicitly disable Phi-4-multimodal in V1 (vllm-project#14889)
Signed-off-by: DarkLight1337 <[email protected]> Signed-off-by: DefTruth <[email protected]>
1 parent 40e7f44 commit ca7d46b

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

vllm/model_executor/models/phi4mm.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
from vllm.transformers_utils.tokenizer import cached_tokenizer_from_config
3434

3535
from .idefics2_vision_model import Idefics2VisionTransformer
36-
from .interfaces import SupportsLoRA, SupportsMultiModal
36+
from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsV0Only
3737
from .phi4mm_audio import AudioEmbedding
3838
from .utils import AutoWeightsLoader, WeightsMapper, maybe_prefix
3939

@@ -1433,7 +1433,8 @@ def cat_with_pad(tensors, dim, padding_value=0):
14331433
"image", get_max_phi4mm_image_tokens)
14341434
@INPUT_REGISTRY.register_dummy_data(dummy_data_for_phi4mm)
14351435
@INPUT_REGISTRY.register_input_processor(input_processor_for_phi4mm)
1436-
class Phi4MMForCausalLM(nn.Module, SupportsLoRA, SupportsMultiModal):
1436+
class Phi4MMForCausalLM(nn.Module, SupportsLoRA, SupportsMultiModal,
1437+
SupportsV0Only):
14371438
"""
14381439
Implements the Phi-4-multimodal-instruct model in vLLM.
14391440
"""

0 commit comments

Comments
 (0)