Skip to content

Commit 5a871d7

Browse files
committed
docs: class migration from sphinx to mkdocs (entrypoints)
Signed-off-by: Zerohertz <[email protected]>
1 parent b8edae1 commit 5a871d7

File tree

2 files changed

+7
-5
lines changed

2 files changed

+7
-5
lines changed

vllm/entrypoints/llm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -129,8 +129,7 @@ class LLM:
129129
compilation_config: Either an integer or a dictionary. If it is an
130130
integer, it is used as the level of compilation optimization. If it
131131
is a dictionary, it can specify the full compilation configuration.
132-
**kwargs: Arguments for [EngineArgs][vllm.EngineArgs]. (See
133-
[engine-args][])
132+
**kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].
134133
135134
Note:
136135
This class is intended to be used for offline inference. For online
@@ -494,7 +493,8 @@ def collective_rpc(self,
494493
`self` argument, in addition to the arguments passed in `args`
495494
and `kwargs`. The `self` argument will be the worker object.
496495
timeout: Maximum time in seconds to wait for execution. Raises a
497-
{exc}`TimeoutError` on timeout. `None` means wait indefinitely.
496+
[`TimeoutError`][TimeoutError] on timeout. `None` means wait
497+
indefinitely.
498498
args: Positional arguments to pass to the worker method.
499499
kwargs: Keyword arguments to pass to the worker method.
500500

vllm/entrypoints/openai/serving_engine.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -582,7 +582,8 @@ def _tokenize_prompt_input(
582582
add_special_tokens: bool = True,
583583
) -> TextTokensPrompt:
584584
"""
585-
A simpler implementation of {meth}`_tokenize_prompt_input_or_inputs`
585+
A simpler implementation of
586+
[`_tokenize_prompt_input_or_inputs`][vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs]
586587
that assumes single input.
587588
"""
588589
return next(
@@ -603,7 +604,8 @@ def _tokenize_prompt_inputs(
603604
add_special_tokens: bool = True,
604605
) -> Iterator[TextTokensPrompt]:
605606
"""
606-
A simpler implementation of {meth}`_tokenize_prompt_input_or_inputs`
607+
A simpler implementation of
608+
[`_tokenize_prompt_input_or_inputs`][vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs]
607609
that assumes multiple inputs.
608610
"""
609611
for text in prompt_inputs:

0 commit comments

Comments
 (0)