Source code for mb.entities.inputs

from typing import Self

from pydantic import BaseModel, Field, JsonValue


[docs] class ChatMessage(BaseModel): """An OpenAI-style message in the conversation.""" role: str content: str @classmethod def user(cls, content: str) -> Self: return cls(role="user", content=content) @classmethod def assistant(cls, content: str) -> Self: return cls(role="assistant", content=content)
Resource = JsonValue """A resource used for turn generation. Currently - any JSON-serializable value is acceptable."""
[docs] class TurnMetadata(BaseModel): categories: dict[str, str] """Categories associated with one turn in the conversation turn. For example, whether the query is open-ended or factoid, is it concise of verbose. """ resources: list[Resource] """Resources used for generation, such as documents, API calls, etc."""
[docs] class InputMetadata(BaseModel): turns: list[TurnMetadata] | None = None """Metadata associated with each conversation turn. Each pair of user-assistant messages form a turn, except for the last turn, which has only a user message. This list contains the metadata for each turn. """ categories: dict[str, str] | None = None """Categories associated with the entire input, and remain the same for all turns. For example, user persona attributes. """
[docs] class Inputs(BaseModel, extra="allow"): """Agent input description.""" messages: list[ChatMessage] = Field(..., min_length=1) r"""The chat messages to be processed by the agent, in OpenAI-style format. The last message must have a ``user`` role, and represents the user's request for the agent. Example: .. code-block:: json { "messages": [ {"role": "user", "content": "Who is the King of England?"}, {"role": "assistant", "content": "The King of England is King Charles III."}, {"role": "user", "content": "When was he born?"} ] } """ metadata: InputMetadata | None = None """Additional metadata about the input, such as categories and resources used for generation.""" tools: list[str] | None = None """A subset of the tools available to the agent. If not provided, the agent will use all available tools."""
[docs] def message_dicts(self) -> list[dict[str, str]]: """Return messages as OpenAI-style dicts. This is a convenience for integrations that expect messages in the shape `[{"role": "...", "content": "..."}, ...]` (for example, agent runners). """ return [message.model_dump() for message in self.messages]