We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9a3883c commit e7a9260Copy full SHA for e7a9260
vllm/model_executor/layers/layernorm.py
@@ -12,7 +12,6 @@
12
rms_norm_batch_invariant,
13
vllm_is_batch_invariant,
14
)
15
-from vllm.model_executor.layers.fla.ops.layernorm_guard import rmsnorm_fn
16
from vllm.platforms import current_platform
17
from vllm.utils.torch_utils import direct_register_custom_op
18
@@ -460,6 +459,8 @@ def forward_native(
460
459
def forward_cuda(
461
self, x: torch.Tensor, z: torch.Tensor | None = None
462
) -> torch.Tensor:
+ from vllm.model_executor.layers.fla.ops.layernorm_guard import rmsnorm_fn
463
+
464
return rmsnorm_fn(
465
x,
466
self.weight,
0 commit comments