Skip to content

Commit f22cb28

Browse files
committed
refactor: remove unused layer_idx parameter from DeepseekV2Attention
Signed-off-by: Travis Johnson <[email protected]>
1 parent aef64ee commit f22cb28

File tree

1 file changed

+0
-3
lines changed

1 file changed

+0
-3
lines changed

vllm/model_executor/models/deepseek_v2.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -182,11 +182,9 @@ def __init__(
182182
max_position_embeddings: int = 8192,
183183
cache_config: Optional[CacheConfig] = None,
184184
quant_config: Optional[QuantizationConfig] = None,
185-
layer_idx=None,
186185
prefix: str = "",
187186
) -> None:
188187
super().__init__()
189-
self.layer_idx = layer_idx
190188
self.hidden_size = hidden_size
191189
self.qk_nope_head_dim = qk_nope_head_dim
192190
self.qk_rope_head_dim = qk_rope_head_dim
@@ -353,7 +351,6 @@ def __init__(
353351
max_position_embeddings=max_position_embeddings,
354352
cache_config=cache_config,
355353
quant_config=quant_config,
356-
layer_idx=layer_idx,
357354
prefix=f"{prefix}.self_attn",
358355
)
359356
if (config.n_routed_experts is not None

0 commit comments

Comments
 (0)