Skip to content

Commit 745a3ba

Browse files
xyang16jeejeelee
andauthored
[LoRA] Support FusedMoE LoRA Triton kernel for mxfp4 (vllm-project#28971)
Signed-off-by: Xin Yang <[email protected]> Co-authored-by: Jee Jee Li <[email protected]>
1 parent 35657bc commit 745a3ba

File tree

4 files changed

+440
-11
lines changed

4 files changed

+440
-11
lines changed
Lines changed: 250 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,250 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3+
"""
4+
Test modular OAI Triton MoE
5+
"""
6+
7+
import pytest
8+
import torch
9+
10+
from vllm.utils.import_utils import has_triton_kernels
11+
12+
if not has_triton_kernels():
13+
pytest.skip(
14+
"triton_kernels not found, skipping all related tests",
15+
allow_module_level=True,
16+
)
17+
18+
from triton_kernels.matmul_ogs import FlexCtx, PrecisionConfig
19+
from triton_kernels.numerics import InFlexData
20+
from triton_kernels.numerics_details.mxfp import downcast_to_mxfp, upcast_from_mxfp
21+
from triton_kernels.tensor import FP4, convert_layout, wrap_torch_tensor
22+
from triton_kernels.tensor_details import layout
23+
from triton_kernels.testing import assert_close
24+
25+
from vllm.config import VllmConfig, set_current_vllm_config
26+
from vllm.model_executor.layers.fused_moe.config import mxfp4_w4a16_moe_quant_config
27+
from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import (
28+
OAITritonExperts,
29+
UnfusedOAITritonExperts,
30+
)
31+
from vllm.model_executor.layers.fused_moe.modular_kernel import FusedMoEModularKernel
32+
from vllm.model_executor.layers.fused_moe.prepare_finalize import (
33+
MoEPrepareAndFinalizeNoEP,
34+
)
35+
from vllm.model_executor.layers.utils import shuffle_weight
36+
from vllm.platforms import current_platform
37+
38+
MNK = [
39+
(1, 512, 384),
40+
(1, 2880, 2880),
41+
(2, 512, 384),
42+
(2, 2880, 2880),
43+
(32, 2880, 2880),
44+
(64, 2880, 2880),
45+
]
46+
47+
48+
def unshuffle_weight(w: torch.Tensor):
49+
first = w[..., ::2]
50+
second = w[..., 1::2]
51+
return torch.concat((first, second), dim=-1)
52+
53+
54+
def make_weights(dtype, k, n, e):
55+
w1 = torch.randn((e, k, 2 * n), dtype=dtype, device="cuda")
56+
w1_bias = torch.randn((e, 2 * n), dtype=dtype, device="cuda")
57+
58+
w2 = torch.randn((e, n, k), dtype=dtype, device="cuda")
59+
w2_bias = torch.randn((e, k), dtype=dtype, device="cuda")
60+
61+
w1_tri = w1.clone()
62+
w2_tri = w2.clone()
63+
64+
w1_bias_tri = w1_bias.clone()
65+
w2_bias_tri = w2_bias.clone()
66+
w1_bias_tri = w1_bias_tri.to(torch.float32)
67+
w2_bias_tri = w2_bias_tri.to(torch.float32)
68+
69+
# shuffle weights
70+
w1_tri = shuffle_weight(w1_tri)
71+
w1_bias_tri = shuffle_weight(w1_bias_tri)
72+
73+
# quant triton_weights
74+
w1_tri, w1_scale_tri = downcast_to_mxfp(w1_tri, torch.uint8, axis=1)
75+
w1 = upcast_from_mxfp(w1_tri, w1_scale_tri, dtype, axis=1)
76+
w1 = unshuffle_weight(w1)
77+
78+
w2_tri, w2_scale_tri = downcast_to_mxfp(w2_tri, torch.uint8, axis=1)
79+
w2 = upcast_from_mxfp(w2_tri, w2_scale_tri, dtype, axis=1)
80+
81+
num_warps = 8
82+
w_layout, w_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1)
83+
w_scale_layout, w_scale_layout_opts = (
84+
layout.make_default_matmul_mxfp4_w_scale_layout(mx_axis=1, num_warps=num_warps)
85+
)
86+
87+
w1_tri = convert_layout(wrap_torch_tensor(w1_tri, FP4), w_layout, **w_layout_opts)
88+
w1_scale_tri = convert_layout(
89+
wrap_torch_tensor(w1_scale_tri),
90+
w_scale_layout,
91+
**w_scale_layout_opts,
92+
)
93+
94+
w2_tri = convert_layout(wrap_torch_tensor(w2_tri, FP4), w_layout, **w_layout_opts)
95+
w2_scale_tri = convert_layout(
96+
wrap_torch_tensor(w2_scale_tri),
97+
w_scale_layout,
98+
**w_scale_layout_opts,
99+
)
100+
101+
w1_precision_config = PrecisionConfig(
102+
weight_scale=w1_scale_tri, flex_ctx=FlexCtx(rhs_data=InFlexData())
103+
)
104+
w2_precision_config = PrecisionConfig(
105+
weight_scale=w2_scale_tri, flex_ctx=FlexCtx(rhs_data=InFlexData())
106+
)
107+
108+
return (
109+
w1,
110+
w2,
111+
w1_bias,
112+
w2_bias,
113+
w1_tri,
114+
w2_tri,
115+
w1_bias_tri,
116+
w2_bias_tri,
117+
w1_precision_config,
118+
w2_precision_config,
119+
)
120+
121+
122+
def swiglu(x, alpha: float = 1.702, limit: float = 1.0):
123+
# Note we add an extra bias of 1 to the linear layer
124+
x_glu, x_linear = torch.chunk(x, 2, dim=-1)
125+
if limit is not None:
126+
x_glu = x_glu.clamp(max=limit)
127+
out_glu = x_glu * torch.sigmoid(alpha * x_glu)
128+
if limit is not None:
129+
x_linear = x_linear.clamp(min=-limit, max=limit)
130+
return out_glu * (x_linear + 1)
131+
132+
133+
def torch_moe_impl(
134+
hidden_states: torch.Tensor, # (M, K)
135+
w1: torch.Tensor, # (E, K, 2N)
136+
w2: torch.Tensor, # (E, N, K)
137+
w1_bias: torch.Tensor, # (E, 2N)
138+
w2_bias: torch.Tensor, # (E, K)
139+
topk_weights: torch.Tensor, # (M, topk)
140+
topk_ids: torch.Tensor, # (M, topk)
141+
):
142+
w1 = w1[topk_ids, ...]
143+
w1_bias = w1_bias[topk_ids, ...]
144+
hidden_states = torch.einsum("bekc,bk->bec", w1, hidden_states) + w1_bias
145+
hidden_states = swiglu(hidden_states, limit=7)
146+
147+
w2 = w2[topk_ids, ...]
148+
w2_bias = w2_bias[topk_ids, ...]
149+
hidden_states = torch.einsum("bekc,bek->bec", w2, hidden_states) + w2_bias
150+
151+
# Weighted sum of experts
152+
hidden_states = torch.einsum("bec,be->bc", hidden_states, topk_weights)
153+
return hidden_states
154+
155+
156+
def oai_triton_moe_impl(
157+
x: torch.Tensor,
158+
w1: torch.Tensor,
159+
w2: torch.Tensor,
160+
w1_scale: "PrecisionConfig",
161+
w2_scale: "PrecisionConfig",
162+
w1_bias: torch.Tensor | None,
163+
w2_bias: torch.Tensor | None,
164+
num_experts: int,
165+
topk_weights: torch.Tensor,
166+
topk_ids: torch.Tensor,
167+
unfused: bool = False,
168+
) -> torch.Tensor:
169+
quant_config = mxfp4_w4a16_moe_quant_config(
170+
w1_bias=w1_bias,
171+
w2_bias=w2_bias,
172+
w1_scale=w1_scale,
173+
w2_scale=w2_scale,
174+
)
175+
176+
if unfused:
177+
fused_experts = UnfusedOAITritonExperts(quant_config)
178+
else:
179+
fused_experts = OAITritonExperts(quant_config)
180+
181+
mk = FusedMoEModularKernel(MoEPrepareAndFinalizeNoEP(), fused_experts)
182+
183+
return mk.forward(
184+
hidden_states=x,
185+
w1=w1,
186+
w2=w2,
187+
topk_weights=topk_weights,
188+
topk_ids=topk_ids,
189+
inplace=True,
190+
activation="swigluoai",
191+
global_num_experts=num_experts,
192+
expert_map=None,
193+
apply_router_weight_on_input=False,
194+
)
195+
196+
197+
@pytest.mark.skipif(
198+
not current_platform.is_cuda(), reason="This test is skipped on non-CUDA platform."
199+
)
200+
@pytest.mark.parametrize("dtype", [torch.bfloat16])
201+
@pytest.mark.parametrize("m,n,k", MNK)
202+
@pytest.mark.parametrize("num_experts", [32, 128])
203+
@pytest.mark.parametrize("topk", [4])
204+
@pytest.mark.parametrize("unfused", [True, False])
205+
def test_oai_triton_moe(
206+
dtype: torch.dtype,
207+
m: int,
208+
n: int,
209+
k: int,
210+
num_experts: int,
211+
topk: int,
212+
unfused: bool,
213+
):
214+
current_platform.seed_everything(0)
215+
(
216+
w1,
217+
w2,
218+
w1_bias,
219+
w2_bias,
220+
w1_tri,
221+
w2_tri,
222+
w1_bias_tri,
223+
w2_bias_tri,
224+
w1_precision_config,
225+
w2_precision_config,
226+
) = make_weights(dtype, k, n, num_experts)
227+
228+
x = torch.randn((m, k), dtype=dtype, device="cuda")
229+
router_logits = torch.randn(m, num_experts, device="cuda", dtype=dtype)
230+
topk_weights, topk_ids = torch.topk(router_logits, k=topk, dim=-1, sorted=True)
231+
topk_weights = torch.nn.functional.softmax(topk_weights, dim=-1)
232+
233+
with set_current_vllm_config(VllmConfig()):
234+
out_ref = torch_moe_impl(x, w1, w2, w1_bias, w2_bias, topk_weights, topk_ids)
235+
236+
out = oai_triton_moe_impl(
237+
x,
238+
w1_tri,
239+
w2_tri,
240+
w1_precision_config,
241+
w2_precision_config,
242+
w1_bias_tri,
243+
w2_bias_tri,
244+
num_experts,
245+
topk_weights,
246+
topk_ids,
247+
unfused,
248+
)
249+
250+
assert_close(ref=out_ref, tri=out, maxtol=0.025, rmstol=0.005)

vllm/lora/layers/fused_moe.py

Lines changed: 26 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,24 @@
2020
_get_config_dtype_str,
2121
)
2222
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
23-
modular_marlin_fused_moe,
23+
MarlinExperts,
2424
)
2525
from vllm.model_executor.layers.fused_moe.fused_moe import (
26-
modular_triton_fused_moe,
26+
TritonExperts,
2727
try_get_optimal_moe_config,
2828
)
2929
from vllm.model_executor.layers.fused_moe.fused_moe_modular_method import (
3030
FusedMoEModularMethod,
3131
)
32+
from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import (
33+
UnfusedOAITritonExperts,
34+
)
35+
from vllm.model_executor.layers.fused_moe.modular_kernel import (
36+
FusedMoEModularKernel,
37+
)
38+
from vllm.model_executor.layers.fused_moe.prepare_finalize import (
39+
MoEPrepareAndFinalizeNoEP,
40+
)
3241

3342
from .utils import _get_lora_device
3443

@@ -114,15 +123,23 @@ def _inject_lora_into_fused_moe(self):
114123
self.base_layer.ensure_moe_quant_config_init()
115124
quant_config = self.base_layer.quant_method.moe_quant_config
116125

117-
m_fused_moe_fn = (
118-
modular_triton_fused_moe(
119-
quant_config, shared_experts=self.base_layer.shared_experts
126+
prepare_finalize = MoEPrepareAndFinalizeNoEP()
127+
m_fused_moe_fn = FusedMoEModularKernel(
128+
prepare_finalize,
129+
self.base_layer.quant_method.select_gemm_impl(
130+
prepare_finalize, self.base_layer
131+
),
132+
self.base_layer.shared_experts,
133+
getattr(self.base_layer, "shared_experts_stream", None),
134+
)
135+
if quant_config.use_mxfp4_w4a16:
136+
assert isinstance(
137+
m_fused_moe_fn.fused_experts, (MarlinExperts, UnfusedOAITritonExperts)
120138
)
121-
if not quant_config.use_mxfp4_w4a16
122-
else modular_marlin_fused_moe(
123-
quant_config, shared_experts=self.base_layer.shared_experts
139+
else:
140+
assert isinstance(
141+
m_fused_moe_fn.fused_experts, (MarlinExperts, TritonExperts)
124142
)
125-
)
126143

127144
def fwd_decorator(layer, func):
128145
def wrapper(*args, **kwargs):

0 commit comments

Comments
 (0)