File tree Expand file tree Collapse file tree 2 files changed +6
-3
lines changed
vllm/model_executor/models Expand file tree Collapse file tree 2 files changed +6
-3
lines changed Original file line number Diff line number Diff line change 4747from vllm .model_executor .utils import set_weight_attrs
4848from vllm .sequence import IntermediateTensors
4949
50+ from .interfaces import SupportsLoRA
51+
5052
5153@torch .compile
5254def layer_norm_func (hidden_states , weight , variance_epsilon ):
@@ -292,8 +294,7 @@ def forward(
292294 return hidden_states
293295
294296
295- class CohereForCausalLM (nn .Module ):
296-
297+ class CohereForCausalLM (nn .Module , SupportsLoRA ):
297298 packed_modules_mapping = {
298299 "qkv_proj" : [
299300 "q_proj" ,
Original file line number Diff line number Diff line change 3838from vllm .worker .model_runner import (_BATCH_SIZES_TO_CAPTURE ,
3939 _get_graph_batch_size )
4040
41+ from .interfaces import SupportsLoRA
42+
4143KVCache = Tuple [torch .Tensor , torch .Tensor ]
4244
4345
@@ -539,7 +541,7 @@ def forward(
539541 return hidden_states
540542
541543
542- class JambaForCausalLM (nn .Module , HasInnerState ):
544+ class JambaForCausalLM (nn .Module , HasInnerState , SupportsLoRA ):
543545 packed_modules_mapping = {
544546 "qkv_proj" : [
545547 "q_proj" ,
You can’t perform that action at this time.
0 commit comments