@@ -80,22 +80,24 @@ class EmbedsPrompt(TypedDict):
8080"""
8181Set of possible schemas for a single prompt:
8282
83- - A text prompt ({class} `str` or {class} `TextPrompt`)
84- - A tokenized prompt ({class} `TokensPrompt`)
85- - An embeddings prompt ({class} `EmbedsPrompt`)
83+ - A text prompt ([ `str`][] or [ `TextPrompt`][vllm.inputs.data.TextPrompt] )
84+ - A tokenized prompt ([ `TokensPrompt`][vllm.inputs.data.TokensPrompt] )
85+ - An embeddings prompt ([ `EmbedsPrompt`][vllm.inputs.data.EmbedsPrompt] )
8686
8787Note that "singleton" is as opposed to a data structure
8888which encapsulates multiple prompts, i.e. of the sort
8989which may be utilized for encoder/decoder models when
9090the user desires to express both the encoder & decoder
91- prompts explicitly, i.e. {class}`ExplicitEncoderDecoderPrompt`
91+ prompts explicitly, i.e.
92+ [`ExplicitEncoderDecoderPrompt`][vllm.inputs.data.ExplicitEncoderDecoderPrompt]
9293
93- A prompt of type {class} `SingletonPrompt` may be employed
94- as (1) input to a decoder-only model, (2) input to
94+ A prompt of type [ `SingletonPrompt`][vllm.inputs.data.SingletonPrompt] may be
95+ employed as (1) input to a decoder-only model, (2) input to
9596the encoder of an encoder/decoder model, in the scenario
9697where the decoder-prompt is not specified explicitly, or
9798(3) as a member of a larger data structure encapsulating
98- more than one prompt, i.e. {class}`ExplicitEncoderDecoderPrompt`
99+ more than one prompt, i.e.
100+ [`ExplicitEncoderDecoderPrompt`][vllm.inputs.data.ExplicitEncoderDecoderPrompt]
99101"""
100102
101103
@@ -126,18 +128,20 @@ class ExplicitEncoderDecoderPrompt(TypedDict, Generic[_T1_co, _T2_co]):
126128 comprising an explicit encoder prompt and a decoder prompt.
127129
128130 The encoder and decoder prompts, respectively, may be formatted
129- according to any of the {class}`SingletonPrompt` schemas,
131+ according to any of the
132+ [`SingletonPrompt`][vllm.inputs.data.SingletonPrompt] schemas,
130133 and are not required to have the same schema.
131134
132135 Only the encoder prompt may have multi-modal data. mm_processor_kwargs
133136 should be at the top-level, and should not be set in the encoder/decoder
134137 prompts, since they are agnostic to the encoder/decoder.
135138
136- Note that an {class}`ExplicitEncoderDecoderPrompt` may not
137- be used as an input to a decoder-only model,
139+ Note that an
140+ [`ExplicitEncoderDecoderPrompt`][vllm.inputs.data.ExplicitEncoderDecoderPrompt]
141+ may not be used as an input to a decoder-only model,
138142 and that the `encoder_prompt` and `decoder_prompt`
139143 fields of this data structure themselves must be
140- {class} `SingletonPrompt` instances.
144+ [ `SingletonPrompt`][vllm.inputs.data.SingletonPrompt] instances.
141145 """
142146
143147 encoder_prompt : _T1_co
@@ -152,11 +156,11 @@ class ExplicitEncoderDecoderPrompt(TypedDict, Generic[_T1_co, _T2_co]):
152156Set of possible schemas for an LLM input, including
153157both decoder-only and encoder/decoder input types:
154158
155- - A text prompt ({class} `str` or {class} `TextPrompt`)
156- - A tokenized prompt ({class} `TokensPrompt`)
157- - An embeddings prompt ({class} `EmbedsPrompt`)
159+ - A text prompt ([ `str`][] or [ `TextPrompt`][vllm.inputs.data.TextPrompt] )
160+ - A tokenized prompt ([ `TokensPrompt`][vllm.inputs.data.TokensPrompt] )
161+ - An embeddings prompt ([ `EmbedsPrompt`][vllm.inputs.data.EmbedsPrompt] )
158162- A single data structure containing both an encoder and a decoder prompt
159- ({class} `ExplicitEncoderDecoderPrompt`)
163+ ([ `ExplicitEncoderDecoderPrompt`][vllm.inputs.data.ExplicitEncoderDecoderPrompt] )
160164"""
161165
162166
@@ -189,7 +193,8 @@ def token_inputs(
189193 prompt : Optional [str ] = None ,
190194 cache_salt : Optional [str ] = None ,
191195) -> TokenInputs :
192- """Construct {class}`TokenInputs` from optional values."""
196+ """Construct [`TokenInputs`][vllm.inputs.data.TokenInputs] from optional
197+ values."""
193198 inputs = TokenInputs (type = "token" , prompt_token_ids = prompt_token_ids )
194199
195200 if prompt is not None :
@@ -221,7 +226,8 @@ def embeds_inputs(
221226 prompt_embeds : torch .Tensor ,
222227 cache_salt : Optional [str ] = None ,
223228) -> EmbedsInputs :
224- """Construct :class:`EmbedsInputs` from optional values."""
229+ """Construct [`EmbedsInputs`][vllm.inputs.data.EmbedsInputs] from optional
230+ values."""
225231 inputs = EmbedsInputs (type = "embeds" , prompt_embeds = prompt_embeds )
226232
227233 if cache_salt is not None :
@@ -232,19 +238,20 @@ def embeds_inputs(
232238
233239DecoderOnlyInputs = Union [TokenInputs , EmbedsInputs , "MultiModalInputs" ]
234240"""
235- The inputs in {class}`~ vllm.LLMEngine` before they are
241+ The inputs in [`LLMEngine`][ vllm.engine.llm_engine. LLMEngine] before they are
236242passed to the model executor.
237243This specifies the data required for decoder-only models.
238244"""
239245
240246
241247class EncoderDecoderInputs (TypedDict ):
242248 """
243- The inputs in {class}`~ vllm.LLMEngine` before they are
244- passed to the model executor.
249+ The inputs in [`LLMEngine`][ vllm.engine.llm_engine. LLMEngine] before they
250+ are passed to the model executor.
245251
246252 This specifies the required data for encoder-decoder models.
247253 """
254+
248255 encoder : Union [TokenInputs , "MultiModalInputs" ]
249256 """The inputs for the encoder portion."""
250257
@@ -254,13 +261,13 @@ class EncoderDecoderInputs(TypedDict):
254261
255262SingletonInputs = Union [TokenInputs , EmbedsInputs , "MultiModalInputs" ]
256263"""
257- A processed {class} `SingletonPrompt` which can be passed to
258- {class} `vllm.sequence.Sequence`.
264+ A processed [ `SingletonPrompt`][vllm.inputs.data.SingletonPrompt] which can be
265+ passed to [ `vllm.sequence.Sequence`][] .
259266"""
260267
261268ProcessorInputs = Union [DecoderOnlyInputs , EncoderDecoderInputs ]
262269"""
263- The inputs to {data} `vllm.inputs.InputProcessor` .
270+ The outputs from [ `vllm.inputs.preprocess.InputPreprocessor`][] .
264271"""
265272
266273_T1 = TypeVar ("_T1" , bound = SingletonPrompt , default = SingletonPrompt )
@@ -277,7 +284,8 @@ def build_explicit_enc_dec_prompt(
277284 return ExplicitEncoderDecoderPrompt (
278285 encoder_prompt = encoder_prompt ,
279286 decoder_prompt = decoder_prompt ,
280- mm_processor_kwargs = mm_processor_kwargs )
287+ mm_processor_kwargs = mm_processor_kwargs ,
288+ )
281289
282290
283291def zip_enc_dec_prompts (
@@ -288,7 +296,8 @@ def zip_enc_dec_prompts(
288296) -> list [ExplicitEncoderDecoderPrompt [_T1 , _T2 ]]:
289297 """
290298 Zip encoder and decoder prompts together into a list of
291- {class}`ExplicitEncoderDecoderPrompt` instances.
299+ [`ExplicitEncoderDecoderPrompt`][vllm.inputs.data.ExplicitEncoderDecoderPrompt]
300+ instances.
292301
293302 ``mm_processor_kwargs`` may also be provided; if a dict is passed, the same
294303 dictionary will be used for every encoder/decoder prompt. If an iterable is
@@ -299,10 +308,11 @@ def zip_enc_dec_prompts(
299308 if isinstance (mm_processor_kwargs , dict ):
300309 return [
301310 build_explicit_enc_dec_prompt (
302- encoder_prompt , decoder_prompt ,
303- cast (dict [str , Any ], mm_processor_kwargs ))
304- for (encoder_prompt ,
305- decoder_prompt ) in zip (enc_prompts , dec_prompts )
311+ encoder_prompt ,
312+ decoder_prompt ,
313+ cast (dict [str , Any ], mm_processor_kwargs ),
314+ ) for (encoder_prompt ,
315+ decoder_prompt ) in zip (enc_prompts , dec_prompts )
306316 ]
307317 return [
308318 build_explicit_enc_dec_prompt (encoder_prompt , decoder_prompt ,
0 commit comments