Skip to content

vllm.entrypoints.pooling.embed.protocol

EmbeddingRequest module-attribute

EmbeddingBytesResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingBytesResponse(OpenAIBaseModel):
    content: list[bytes]
    headers: dict[str, str] | None = None
    media_type: str = "application/octet-stream"

content instance-attribute

content: list[bytes]

headers class-attribute instance-attribute

headers: dict[str, str] | None = None

media_type class-attribute instance-attribute

media_type: str = 'application/octet-stream'

EmbeddingChatRequest

Bases: PoolingBasicRequestMixin, ChatRequestMixin

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingChatRequest(PoolingBasicRequestMixin, ChatRequestMixin):
    encoding_format: EncodingFormat = "float"
    dimensions: int | None = None

    # --8<-- [start:chat-embedding-extra-params]
    mm_processor_kwargs: dict[str, Any] | None = Field(
        default=None,
        description=("Additional kwargs to pass to the HF processor."),
    )
    normalize: bool | None = Field(
        default=None,
        description="Whether to normalize the embeddings outputs. Default is True.",
    )
    embed_dtype: EmbedDType = Field(
        default="float32",
        description=(
            "What dtype to use for encoding. Default to using float32 for base64 "
            "encoding to match the OpenAI python client behavior. "
            "This parameter will affect base64 and binary_response."
        ),
    )
    endianness: Endianness = Field(
        default="native",
        description=(
            "What endianness to use for encoding. Default to using native for "
            "base64 encoding to match the OpenAI python client behavior."
            "This parameter will affect base64 and binary_response."
        ),
    )
    # --8<-- [end:chat-embedding-extra-params]

    def to_pooling_params(self):
        return PoolingParams(
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            dimensions=self.dimensions,
            use_activation=self.normalize,
        )

dimensions class-attribute instance-attribute

dimensions: int | None = None

embed_dtype class-attribute instance-attribute

embed_dtype: EmbedDType = Field(
    default="float32",
    description="What dtype to use for encoding. Default to using float32 for base64 encoding to match the OpenAI python client behavior. This parameter will affect base64 and binary_response.",
)

encoding_format class-attribute instance-attribute

encoding_format: EncodingFormat = 'float'

endianness class-attribute instance-attribute

endianness: Endianness = Field(
    default="native",
    description="What endianness to use for encoding. Default to using native for base64 encoding to match the OpenAI python client behavior.This parameter will affect base64 and binary_response.",
)

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: dict[str, Any] | None = Field(
    default=None,
    description="Additional kwargs to pass to the HF processor.",
)

normalize class-attribute instance-attribute

normalize: bool | None = Field(
    default=None,
    description="Whether to normalize the embeddings outputs. Default is True.",
)

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/embed/protocol.py
def to_pooling_params(self):
    return PoolingParams(
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        dimensions=self.dimensions,
        use_activation=self.normalize,
    )

EmbeddingCompletionRequest

Bases: PoolingBasicRequestMixin, CompletionRequestMixin

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingCompletionRequest(PoolingBasicRequestMixin, CompletionRequestMixin):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/embeddings

    encoding_format: EncodingFormat = "float"
    dimensions: int | None = None

    # --8<-- [start:embedding-extra-params]
    normalize: bool | None = Field(
        default=None,
        description="Whether to normalize the embeddings outputs. Default is True.",
    )
    embed_dtype: EmbedDType = Field(
        default="float32",
        description=(
            "What dtype to use for encoding. Default to using float32 for base64 "
            "encoding to match the OpenAI python client behavior. "
            "This parameter will affect base64 and binary_response."
        ),
    )
    endianness: Endianness = Field(
        default="native",
        description=(
            "What endianness to use for encoding. Default to using native for "
            "base64 encoding to match the OpenAI python client behavior."
            "This parameter will affect base64 and binary_response."
        ),
    )
    # --8<-- [end:embedding-extra-params]

    def to_pooling_params(self):
        return PoolingParams(
            dimensions=self.dimensions,
            use_activation=self.normalize,
            truncate_prompt_tokens=self.truncate_prompt_tokens,
        )

dimensions class-attribute instance-attribute

dimensions: int | None = None

embed_dtype class-attribute instance-attribute

embed_dtype: EmbedDType = Field(
    default="float32",
    description="What dtype to use for encoding. Default to using float32 for base64 encoding to match the OpenAI python client behavior. This parameter will affect base64 and binary_response.",
)

encoding_format class-attribute instance-attribute

encoding_format: EncodingFormat = 'float'

endianness class-attribute instance-attribute

endianness: Endianness = Field(
    default="native",
    description="What endianness to use for encoding. Default to using native for base64 encoding to match the OpenAI python client behavior.This parameter will affect base64 and binary_response.",
)

normalize class-attribute instance-attribute

normalize: bool | None = Field(
    default=None,
    description="Whether to normalize the embeddings outputs. Default is True.",
)

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/embed/protocol.py
def to_pooling_params(self):
    return PoolingParams(
        dimensions=self.dimensions,
        use_activation=self.normalize,
        truncate_prompt_tokens=self.truncate_prompt_tokens,
    )

EmbeddingResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"embd-{random_uuid()}")
    object: str = "list"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    data: list[EmbeddingResponseData]
    usage: UsageInfo

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

data instance-attribute

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"embd-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'list'

usage instance-attribute

usage: UsageInfo

EmbeddingResponseData

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/embed/protocol.py
class EmbeddingResponseData(OpenAIBaseModel):
    index: int
    object: str = "embedding"
    embedding: list[float] | str

embedding instance-attribute

embedding: list[float] | str

index instance-attribute

index: int

object class-attribute instance-attribute

object: str = 'embedding'