Skip to content

Commit a24ea54

Browse files
[Deprecation] Advance deprecation status (#29617)
Signed-off-by: DarkLight1337 <[email protected]>
1 parent ea228b4 commit a24ea54

File tree

4 files changed

+3
-84
lines changed

4 files changed

+3
-84
lines changed

vllm/config/scheduler.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
from pydantic import Field, field_validator
99
from pydantic.dataclasses import dataclass
10-
from typing_extensions import Self, deprecated
10+
from typing_extensions import Self
1111

1212
from vllm.config.utils import config
1313
from vllm.logger import init_logger
@@ -224,19 +224,6 @@ def __post_init__(self, max_model_len: int, is_encoder_decoder: bool) -> None:
224224

225225
self.verify_max_model_len(max_model_len)
226226

227-
@property
228-
@deprecated(
229-
"`SchedulerConfig.chunked_prefill_enabled` has been renamed to "
230-
"`SchedulerConfig.enable_chunked_prefill`. "
231-
"The old name will be removed in v0.12."
232-
)
233-
def chunked_prefill_enabled(self) -> bool:
234-
return self.enable_chunked_prefill
235-
236-
@chunked_prefill_enabled.setter
237-
def chunked_prefill_enabled(self, value: bool):
238-
self.enable_chunked_prefill = value
239-
240227
def verify_max_model_len(self, max_model_len: int) -> Self:
241228
if (
242229
self.max_num_batched_tokens < max_model_len

vllm/distributed/parallel_state.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
import torch.distributed._functional_collectives as funcol
4242
import torch.distributed._symmetric_memory
4343
from torch.distributed import Backend, ProcessGroup
44-
from typing_extensions import deprecated
4544

4645
import vllm.envs as envs
4746
from vllm.distributed.device_communicators.base_device_communicator import (
@@ -1078,15 +1077,6 @@ def get_tp_group() -> GroupCoordinator:
10781077
return _TP
10791078

10801079

1081-
@deprecated(
1082-
"`get_tensor_model_parallel_group` has been replaced with "
1083-
"`get_tp_group` and may be removed after v0.12. Please use "
1084-
"`get_tp_group` instead."
1085-
)
1086-
def get_tensor_model_parallel_group():
1087-
return get_tp_group()
1088-
1089-
10901080
_DCP: GroupCoordinator | None = None
10911081

10921082

@@ -1130,15 +1120,6 @@ def get_pcp_group() -> GroupCoordinator:
11301120
return _PCP
11311121

11321122

1133-
@deprecated(
1134-
"`get_pipeline_model_parallel_group` has been replaced with "
1135-
"`get_pp_group` and may be removed in v0.12. Please use "
1136-
"`get_pp_group` instead."
1137-
)
1138-
def get_pipeline_model_parallel_group():
1139-
return get_pp_group()
1140-
1141-
11421123
@contextmanager
11431124
def graph_capture(device: torch.device):
11441125
"""

vllm/model_executor/models/utils.py

Lines changed: 0 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
import torch.nn as nn
1111
from torch.func import functional_call
1212
from transformers import PretrainedConfig
13-
from typing_extensions import deprecated
1413

1514
from vllm.config import VllmConfig
1615
from vllm.distributed import (
@@ -481,54 +480,6 @@ def _merge_multimodal_embeddings(
481480
return inputs_embeds
482481

483482

484-
@deprecated(
485-
"`merge_multimodal_embeddings` has been replaced with "
486-
"`SupportsMultiModal.embed_input_ids` and will be "
487-
"removed in v0.12."
488-
)
489-
def merge_multimodal_embeddings(
490-
input_ids: torch.Tensor,
491-
inputs_embeds: torch.Tensor,
492-
multimodal_embeddings: NestedTensors,
493-
placeholder_token_id: int | list[int],
494-
) -> torch.Tensor:
495-
"""
496-
Merge `multimodal_embeddings` into `inputs_embeds` by overwriting the
497-
positions in `inputs_embeds` corresponding to placeholder tokens in
498-
`input_ids`.
499-
500-
`placeholder_token_id` can be a list of token ids (e.g, token ids
501-
of img_start, img_break, and img_end tokens) when needed: This means
502-
the order of these tokens in the `input_ids` MUST MATCH the order of
503-
their embeddings in `multimodal_embeddings` since we need to
504-
slice-merge instead of individually scattering.
505-
506-
For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where
507-
- T is text token
508-
- S is image start token
509-
- I is image embedding token
510-
- B is image break token
511-
- E is image end token.
512-
513-
Then the image embeddings (that correspond to I's) from vision encoder
514-
must be padded with embeddings of S, B, and E in the same order of
515-
input_ids for a correct embedding merge.
516-
517-
Note:
518-
This updates `inputs_embeds` in place.
519-
"""
520-
if isinstance(placeholder_token_id, list):
521-
is_multimodal = isin_list(input_ids, placeholder_token_id)
522-
else:
523-
is_multimodal = input_ids == placeholder_token_id
524-
525-
return _merge_multimodal_embeddings(
526-
inputs_embeds,
527-
multimodal_embeddings=multimodal_embeddings,
528-
is_multimodal=is_multimodal,
529-
)
530-
531-
532483
def isin_list(
533484
elements: torch.Tensor,
534485
test_elements_list: list[int],

vllm/v1/core/sched/output.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,12 +126,12 @@ def num_reqs(self) -> int:
126126
return len(self.req_ids)
127127

128128
@cached_property
129-
@deprecated("use resumed_req_ids field")
129+
@deprecated("This will be removed in v0.14, use `resumed_req_ids` instead.")
130130
def resumed_from_preemption(self) -> list[bool]:
131131
return [req_id in self.resumed_req_ids for req_id in self.req_ids]
132132

133133
@cached_property
134-
@deprecated("use all_token_ids field")
134+
@deprecated("This will be removed in v0.14, use `all_token_ids` instead.")
135135
def resumed_req_token_ids(self) -> list[list[int] | None]:
136136
return [
137137
self.all_token_ids[req_id] if req_id in self.resumed_req_ids else None

0 commit comments

Comments
 (0)