Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/_e2e_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,8 @@ jobs:
pytest -sv tests/e2e/singlecard/test_completion_with_prompt_embeds.py
pytest -sv tests/e2e/singlecard/test_aclgraph.py
pytest -sv tests/e2e/singlecard/test_aclgraph_mem.py
pytest -sv tests/e2e/singlecard/test_ascend_scheduler.py
pytest -sv tests/e2e/singlecard/test_bge_model.py
pytest -sv tests/e2e/singlecard/test_camem.py
pytest -sv tests/e2e/singlecard/test_chunked.py
pytest -sv tests/e2e/singlecard/test_embedding.py
# pytest -sv tests/e2e/singlecard/test_embedding_aclgraph.py
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
Expand Down
10 changes: 5 additions & 5 deletions docs/source/tutorials/DeepSeek-V3.2-Exp.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ vllm serve vllm-ascend/DeepSeek-V3.2-Exp-W8A8 \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.92 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
```

### Multi-node Deployment
Expand Down Expand Up @@ -160,7 +160,7 @@ vllm serve /root/.cache/Modelers_Park/DeepSeek-V3.2-Exp \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.9 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
```

**Node 1**
Expand Down Expand Up @@ -204,7 +204,7 @@ vllm serve /root/.cache/Modelers_Park/DeepSeek-V3.2-Exp \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.92 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
```

::::
Expand Down Expand Up @@ -252,7 +252,7 @@ vllm serve vllm-ascend/DeepSeek-V3.2-Exp-W8A8 \
--quantization ascend \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.9 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
```

**Node 1**
Expand Down Expand Up @@ -299,7 +299,7 @@ vllm serve vllm-ascend/DeepSeek-V3.2-Exp-W8A8 \
--quantization ascend \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.92 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
--additional-config '{"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
```

::::
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorials/multi_node.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ vllm serve vllm-ascend/DeepSeek-V3.1-W8A8 \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.9 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true}}'
--additional-config '{"torchair_graph_config":{"enabled":true}}'
```

**Node 1**
Expand Down Expand Up @@ -182,7 +182,7 @@ vllm serve vllm-ascend/DeepSeek-V3.1-W8A8 \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.92 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true}}'
--additional-config '{"torchair_graph_config":{"enabled":true}}'
```

The deployment view looks like:
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorials/multi_node_kimi.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ vllm serve /home/cache/weights/Kimi-K2-Instruct-W8A8 \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.9 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true}}'
--additional-config '{"torchair_graph_config":{"enabled":true}}'
```

**Node 1**
Expand Down Expand Up @@ -137,7 +137,7 @@ vllm serve /home/cache/weights/Kimi-K2-Instruct-W8A8 \
--trust-remote-code \
--no-enable-prefix-caching \
--gpu-memory-utilization 0.92 \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true}}'
--additional-config '{"torchair_graph_config":{"enabled":true}}'
```

The deployment view looks like:
Expand Down
5 changes: 0 additions & 5 deletions docs/source/tutorials/multi_npu_moge.md
Original file line number Diff line number Diff line change
Expand Up @@ -158,11 +158,6 @@ if __name__ == "__main__":
'torchair_graph_config': {
'enabled': True,
},
'ascend_scheduler_config':{
'enabled': True,
'enable_chunked_prefill' : False,
'chunked_prefill_enabled': False
},
})

outputs = llm.generate(prompts, sampling_params)
Expand Down
19 changes: 0 additions & 19 deletions docs/source/user_guide/configuration/additional_config.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ The following table lists additional configuration options available in vLLM Asc
| Name | Type | Default | Description |
|-------------------------------------|------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------|
| `torchair_graph_config` | dict | `{}` | Configuration options for torchair graph mode |
| `ascend_scheduler_config` | dict | `{}` | Configuration options for ascend scheduler |
| `weight_prefetch_config` | dict | `{}` | Configuration options for weight prefetch |
| `refresh` | bool | `false` | Whether to refresh global Ascend configuration content. This is usually used by rlhf or ut/e2e test case. |
| `expert_map_path` | str | `None` | When using expert load balancing for an MoE model, an expert map path needs to be passed in. |
Expand Down Expand Up @@ -61,18 +60,6 @@ The details of each configuration option are as follows:
| `enable_kv_nz`| bool | `False` | Whether to enable KV Cache NZ layout. This option only takes effect on models using MLA (for example, DeepSeek). |
| `enable_super_kernel` | bool | `False` | Whether to enable super kernel to fuse operators in deepseek moe layers. This option only takes effects on moe models using dynamic w8a8 quantization.|

**ascend_scheduler_config**

| Name | Type | Default | Description |
| ---- | ---- | ------- | ----------- |
| `enabled` | bool | `False` | Whether to enable ascend scheduler for V1 engine.|
| `enable_pd_transfer` | bool | `False` | Whether to enable P-D transfer. When it is enabled, decode is started only when prefill of all requests is done. This option only takes effect on offline inference. |
| `decode_max_num_seqs` | int | `0` | Whether to change max_num_seqs of decode phase when P-D transfer is enabled. This option only takes effect when enable_pd_transfer is True. |
| `max_long_partial_prefills` | Union[int, float] | `float('inf')` | The maximum number of prompts longer than long_prefill_token_threshold that will be prefilled concurrently. |
| `long_prefill_token_threshold` | Union[int, float] | `float('inf')` | a request is considered long if the prompt is longer than this number of tokens. |

ascend_scheduler_config also supports the options from [vllm scheduler config](https://docs.vllm.ai/en/stable/api/vllm/config.html#vllm.config.SchedulerConfig). For example, you can add `enable_chunked_prefill: True` to ascend_scheduler_config as well.

**weight_prefetch_config**

| Name | Type | Default | Description |
Expand All @@ -93,12 +80,6 @@ An example of additional configuration is as follows:
"graph_batch_sizes_init": False,
"enable_kv_nz": False
},
"ascend_scheduler_config": {
"enabled": True,
"enable_chunked_prefill": True,
"max_long_partial_prefills": 1,
"long_prefill_token_threshold": 4096,
},
"weight_prefetch_config": {
"enabled": True,
"prefetch_ratio": {
Expand Down
4 changes: 2 additions & 2 deletions docs/source/user_guide/feature_guide/graph_mode.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,14 @@ import os
from vllm import LLM

# TorchAirGraph only works without chunked-prefill now
model = LLM(model="path/to/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True},"ascend_scheduler_config": {"enabled": True}})
model = LLM(model="path/to/DeepSeek-R1-0528", additional_config={"torchair_graph_config": {"enabled": True}})
outputs = model.generate("Hello, how are you?")
```

Online example:

```shell
vllm serve path/to/DeepSeek-R1-0528 --additional-config='{"torchair_graph_config": {"enabled": true},"ascend_scheduler_config": {"enabled": true}}'
vllm serve path/to/DeepSeek-R1-0528 --additional-config='{"torchair_graph_config": {"enabled": true}}'
```

You can find more details about additional configuration [here](../configuration/additional_config.md).
Expand Down
1 change: 0 additions & 1 deletion examples/offline_inference_npu_long_seq.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
enable_chunked_prefill=False,
max_num_batched_tokens=2048,
max_model_len=1024,
additional_config={"ascend_scheduler_config": {"enabled": False}},
max_num_seqs=1,
block_size=128,
gpu_memory_utilization=0.9
Expand Down
2 changes: 1 addition & 1 deletion examples/run_dp_server.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,4 @@ vllm serve Qwen/Qwen1.5-MoE-A2.7B \
--gpu-memory-utilization 0.9 \
--trust-remote-code \
--enforce-eager \
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":false, "use_cached_graph":false}}'
--additional-config '{"torchair_graph_config":{"enabled":false, "use_cached_graph":false}}'
5 changes: 1 addition & 4 deletions tests/e2e/310p/test_offline_inference_parallel_310p.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,12 @@
MODELS = [
"IntervitensInc/pangu-pro-moe-model",
]
# set additional config for ascend scheduler and torchair graph
# set additional config for torchair graph
ADDITIONAL_CONFIG = [{
"additional_config": {
"torchair_graph_config": {
"enabled": True
},
"ascend_scheduler_config": {
"enabled": True,
}
}
}]

Expand Down
21 changes: 6 additions & 15 deletions tests/e2e/multicard/test_expert_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,14 @@ def test_e2e_ep_correctness(model_name):
max_tokens = 5

# FIXME: Really strange that chunked prefill might lead to different results, investigate further
with VllmRunner(
model_name,
tensor_parallel_size=2,
additional_config={"ascend_scheduler_config": {
"enabled": True
}},
enforce_eager=False) as vllm_model:
with VllmRunner(model_name, tensor_parallel_size=2,
enforce_eager=False) as vllm_model:
tp_output = vllm_model.generate_greedy(example_prompts, max_tokens)

with VllmRunner(
model_name,
tensor_parallel_size=2,
enable_expert_parallel=True,
additional_config={"ascend_scheduler_config": {
"enabled": True
}},
enforce_eager=False) as vllm_model:
with VllmRunner(model_name,
tensor_parallel_size=2,
enable_expert_parallel=True,
enforce_eager=False) as vllm_model:
ep_output = vllm_model.generate_greedy(example_prompts, max_tokens)

check_outputs_equal(
Expand Down
16 changes: 2 additions & 14 deletions tests/e2e/multicard/test_fused_moe_allgather_ep.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,7 @@ def test_generate_with_allgather():
tensor_parallel_size=2,
max_model_len=1024,
dtype="auto",
enable_expert_parallel=True,
additional_config={
"ascend_scheduler_config": {
"enabled": True,
"chunked_prefill_enabled": False,
},
}) as vllm_model:
enable_expert_parallel=True) as vllm_model:
vllm_model.generate(example_prompts, sampling_params)


Expand All @@ -76,11 +70,5 @@ def test_generate_with_alltoall():
tensor_parallel_size=2,
max_model_len=1024,
dtype="auto",
enable_expert_parallel=True,
additional_config={
"ascend_scheduler_config": {
"enabled": True,
"chunked_prefill_enabled": False,
},
}) as vllm_model:
enable_expert_parallel=True) as vllm_model:
vllm_model.generate(example_prompts, sampling_params)
14 changes: 3 additions & 11 deletions tests/e2e/multicard/test_offline_inference_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,6 @@ def test_models_distributed_DeepSeek_multistream_moe():
"enabled": True,
},
"enable_multistream_moe": True,
"ascend_scheduler_config": {
"enabled": True,
},
"refresh": True,
},
) as vllm_model:
Expand Down Expand Up @@ -154,14 +151,9 @@ def test_models_distributed_DeepSeek_W4A8DYNAMIC(model):
quantization="ascend",
enforce_eager=True,
enable_expert_parallel=True,
additional_config={
"torchair_graph_config": {
"enabled": False,
},
"ascend_scheduler_config": {
"enabled": True,
}
},
additional_config={"torchair_graph_config": {
"enabled": False,
}},
) as vllm_model:
vllm_model.generate_greedy(prompts, max_tokens)

Expand Down
66 changes: 1 addition & 65 deletions tests/e2e/multicard/test_prefix_caching.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Compare the with and without prefix caching on V1 scheduler or AscendScheduler."""
"""Compare the with and without prefix caching on V1 scheduler."""

import pytest

Expand Down Expand Up @@ -84,67 +84,3 @@ def test_prefix_cache_with_v1_scheduler(model: str, max_tokens: int) -> None:
name_0="vllm_output",
name_1="prefix_cache_output",
)


@pytest.mark.skip(reason="Fix me, the accuracy is not correct")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [50])
def test_prefix_cache_with_ascend_scheduler(model: str,
max_tokens: int) -> None:

with VllmRunner(model,
additional_config={
'ascend_scheduler_config': {
'enabled': True,
},
},
enforce_eager=False,
max_model_len=2048,
tensor_parallel_size=2,
gpu_memory_utilization=0.7) as vllm_model:
vllm_output = vllm_model.generate_greedy(INPUT_PROMPTS, max_tokens)

with VllmRunner(model,
additional_config={
'ascend_scheduler_config': {
'enabled': True,
'enable_prefix_caching': True,
},
},
enforce_eager=False,
max_model_len=2048,
tensor_parallel_size=2,
gpu_memory_utilization=0.7) as vllm_model:
prefix_cache_output = vllm_model.generate_greedy(
INPUT_PROMPTS, max_tokens)

# TODO: enable apc and chunked prefill with ascend scheduler will lead accuracy problem.
# Disable it now. Fix it or drop the ascend scheduler in the future.
# with VllmRunner(model,
# additional_config={
# 'ascend_scheduler_config': {
# 'enabled': True,
# 'enable_prefix_caching': True,
# "enable_chunked_prefill": True,
# },
# },
# enforce_eager=True,
# max_model_len=2048,
# tensor_parallel_size=2,
# gpu_memory_utilization=0.7) as vllm_model:
# chunk_prefill_prefix_cache_output = vllm_model.generate_greedy(
# INPUT_PROMPTS, max_tokens)

check_outputs_equal(
outputs_0_lst=vllm_output,
outputs_1_lst=prefix_cache_output,
name_0="vllm_output",
name_1="prefix_cache_output",
)

# check_outputs_equal(
# outputs_0_lst=chunk_prefill_prefix_cache_output,
# outputs_1_lst=prefix_cache_output,
# name_0="chunk_prefill_prefix_cache_output",
# name_1="prefix_cache_output",
# )
9 changes: 3 additions & 6 deletions tests/e2e/multicard/test_qwen3_next.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import os
from unittest.mock import patch

import pytest
from modelscope import snapshot_download # type: ignore

from tests.e2e.conftest import VllmRunner
Expand Down Expand Up @@ -63,6 +64,8 @@ def test_models_distributed_Qwen3_NEXT_TP4_FULL_DECODE_ONLY():
del vllm_model


@pytest.mark.skip(
reason="Qwen3-Next + MTP doesn't work with chunked prefill. Fix Me")
def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
example_prompts = [
"Hello, my name is",
Expand All @@ -89,12 +92,6 @@ def test_models_distributed_Qwen3_NEXT_MTP_TP4_SIMILARITY():
gpu_memory_utilization=0.8,
distributed_executor_backend="mp",
enforce_eager=True,
additional_config={
"ascend_scheduler_config": {
"enabled": True,
"enable_chunked_prefill": False
}
},
speculative_config={
"method": "qwen3_next_mtp",
"num_speculative_tokens": 1
Expand Down
Loading
Loading