Skip to content

Commit 747e90b

Browse files
committed
fix linter
1 parent a2a2663 commit 747e90b

File tree

1 file changed

+7
-6
lines changed

1 file changed

+7
-6
lines changed

vllm/model_executor/models/mixtral.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
from typing import Iterable, List, Optional, Tuple
2626

2727
import torch
28+
import torch.nn.functional as F
2829
from torch import nn
2930
from transformers import MixtralConfig
3031

@@ -183,12 +184,12 @@ def process_weights_after_loading(self):
183184
# Fp8 is the only case where we need to process after loading.
184185
if not self.use_fp8:
185186
if os.getenv("VLLM_MOE_PADDING", "1") == "1":
186-
self.w13_weight = nn.Parameter(
187-
torch.nn.functional.pad(self.w13_weight.data, (0, 128),
188-
"constant", 0))
189-
self.w2_weight = nn.Parameter(
190-
torch.nn.functional.pad(self.w2_weight.data, (0, 128),
191-
"constant", 0))
187+
self.w13_weight = nn.Parameter(F.pad(self.w13_weight.data,
188+
(0, 128), "constant", 0),
189+
requires_grad=False)
190+
self.w2_weight = nn.Parameter(F.pad(self.w2_weight.data,
191+
(0, 128), "constant", 0),
192+
requires_grad=False)
192193
return
193194

194195
# If checkpoint is fp16, quantize here.

0 commit comments

Comments
 (0)