Skip to content

Commit 668b56a

Browse files
WoosukKwonrichardsliu
authored andcommitted
[V1][Minor] Minor enhancements on scheduler (vllm-project#14732)
Signed-off-by: Woosuk Kwon <[email protected]> Signed-off-by: Richard Liu <[email protected]>
1 parent 4a5cebd commit 668b56a

File tree

1 file changed

+3
-5
lines changed

1 file changed

+3
-5
lines changed

vllm/v1/core/scheduler.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -587,9 +587,6 @@ def update_from_output(
587587
if spec_token_ids is not None:
588588
request.spec_token_ids = spec_token_ids[req_index]
589589

590-
# Get prompt logprobs for this request.
591-
prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id)
592-
593590
stopped = False
594591
new_logprobs = None
595592
new_token_ids: list[int] = []
@@ -622,6 +619,8 @@ def update_from_output(
622619
new_token_ids,
623620
)
624621

622+
# Get prompt logprobs for this request.
623+
prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id)
625624
# Transmit partial if chunked prefill & prompt logprobs is enabled
626625
if new_token_ids or prompt_logprobs_tensors is not None:
627626
# Add EngineCoreOutput for this Request.
@@ -693,8 +692,7 @@ def finish_requests(
693692

694693
if request.status == RequestStatus.RUNNING:
695694
self.running.remove(request)
696-
if request.request_id in self.scheduled_req_ids:
697-
self.scheduled_req_ids.remove(request.request_id)
695+
self.scheduled_req_ids.discard(request.request_id)
698696
else:
699697
self.waiting.remove(request)
700698
request.status = finished_status

0 commit comments

Comments
 (0)