Skip to content

Commit 3aafe0d

Browse files
hmellorYuqi Zhang
authored andcommitted
Replace {func} with mkdocs style links (vllm-project#18610)
Signed-off-by: Harry Mellor <[email protected]> Signed-off-by: Yuqi Zhang <[email protected]>
1 parent 1eb9dfe commit 3aafe0d

File tree

5 files changed

+6
-6
lines changed

5 files changed

+6
-6
lines changed

vllm/model_executor/models/llava_next.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -540,7 +540,7 @@ def forward(
540540
Unlike in LLaVA-1.5, the number of image tokens inputted to the language
541541
model depends on the original size of the input image. Including the
542542
original image token in the input, the required number of image tokens
543-
is given by {func}`get_llava_next_image_feature_size`.
543+
is given by [get_llava_next_image_feature_size][].
544544
545545
This way, the `positions` and `attn_metadata` are consistent
546546
with the `input_ids`.

vllm/multimodal/processing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def modality(self) -> str:
387387

388388

389389
def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]:
390-
"""Convenience function to apply {func}`full_groupby` based on modality."""
390+
"""Convenience function to apply [full_groupby][] based on modality."""
391391
return full_groupby(values, key=lambda x: x.modality)
392392

393393

vllm/platforms/interface.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ def is_out_of_tree(self) -> bool:
157157
return self._enum == PlatformEnum.OOT
158158

159159
def is_cuda_alike(self) -> bool:
160-
"""Stateless version of {func}`torch.cuda.is_available`."""
160+
"""Stateless version of [torch.cuda.is_available][]."""
161161
return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM)
162162

163163
def is_sleep_mode_available(self) -> bool:
@@ -194,7 +194,7 @@ def get_device_capability(
194194
cls,
195195
device_id: int = 0,
196196
) -> Optional[DeviceCapability]:
197-
"""Stateless version of {func}`torch.cuda.get_device_capability`."""
197+
"""Stateless version of [torch.cuda.get_device_capability][]."""
198198
return None
199199

200200
@classmethod

vllm/sequence.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727

2828

2929
def array_full(token_id: int, count: int):
30-
"""{class}`array` equivalent of {func}`numpy.full`."""
30+
"""{class}`array` equivalent of [numpy.full][]."""
3131
return array(VLLM_TOKEN_ID_ARRAY_TYPE, [token_id]) * count
3232

3333

vllm/v1/worker/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def gather_mm_placeholders(
6666
"""
6767
Reconstructs the embeddings from the placeholder tokens.
6868
69-
This is the operation of {func}`scatter_mm_placeholders`.
69+
This is the operation of [scatter_mm_placeholders][].
7070
"""
7171
if is_embed is None:
7272
return placeholders

0 commit comments

Comments
 (0)