Skip to content

Commit 98e3f0c

Browse files
vladislavkruglikovAlvant
authored andcommitted
[Bugfix] Correct adapter usage for cohere and jamba (vllm-project#8292)
Signed-off-by: Alvant <[email protected]>
1 parent 4524f2b commit 98e3f0c

File tree

2 files changed

+6
-3
lines changed

2 files changed

+6
-3
lines changed

vllm/model_executor/models/commandr.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@
4747
from vllm.model_executor.utils import set_weight_attrs
4848
from vllm.sequence import IntermediateTensors
4949

50+
from .interfaces import SupportsLoRA
51+
5052

5153
@torch.compile
5254
def layer_norm_func(hidden_states, weight, variance_epsilon):
@@ -292,8 +294,7 @@ def forward(
292294
return hidden_states
293295

294296

295-
class CohereForCausalLM(nn.Module):
296-
297+
class CohereForCausalLM(nn.Module, SupportsLoRA):
297298
packed_modules_mapping = {
298299
"qkv_proj": [
299300
"q_proj",

vllm/model_executor/models/jamba.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@
3838
from vllm.worker.model_runner import (_BATCH_SIZES_TO_CAPTURE,
3939
_get_graph_batch_size)
4040

41+
from .interfaces import SupportsLoRA
42+
4143
KVCache = Tuple[torch.Tensor, torch.Tensor]
4244

4345

@@ -539,7 +541,7 @@ def forward(
539541
return hidden_states
540542

541543

542-
class JambaForCausalLM(nn.Module, HasInnerState):
544+
class JambaForCausalLM(nn.Module, HasInnerState, SupportsLoRA):
543545
packed_modules_mapping = {
544546
"qkv_proj": [
545547
"q_proj",

0 commit comments

Comments
 (0)