From 93e89cf972611f4e44cbb06d7a4aaf774539219f Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Fri, 15 Aug 2025 13:58:11 -0400 Subject: [PATCH] . --- docs/docs/how_to/index.mdx | 4 ++-- .../language_models/chat_models.py | 3 ++- libs/core/langchain_core/messages/ai.py | 3 --- .../langchain_core/outputs/chat_generation.py | 22 ++++++++++--------- libs/core/langchain_core/prompt_values.py | 8 +++++-- 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/docs/docs/how_to/index.mdx b/docs/docs/how_to/index.mdx index dad7eab3603..5706bf002ec 100644 --- a/docs/docs/how_to/index.mdx +++ b/docs/docs/how_to/index.mdx @@ -72,7 +72,7 @@ See [supported integrations](/docs/integrations/chat/) for details on getting st ### Example selectors -[Example Selectors](/docs/concepts/example_selectors) are responsible for selecting the correct few shot examples to pass to the prompt. +[Example Selectors](/docs/concepts/example_selectors) are responsible for selecting the correct few-shot examples to pass to the prompt. - [How to: use example selectors](/docs/how_to/example_selectors) - [How to: select examples by length](/docs/how_to/example_selectors_length_based) @@ -168,7 +168,7 @@ See [supported integrations](/docs/integrations/vectorstores/) for details on ge Indexing is the process of keeping your vectorstore in-sync with the underlying data source. -- [How to: reindex data to keep your vectorstore in sync with the underlying data source](/docs/how_to/indexing) +- [How to: reindex data to keep your vectorstore in-sync with the underlying data source](/docs/how_to/indexing) ### Tools diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 10c7de268c0..db486eb2c59 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -508,7 +508,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): **kwargs: Any, ) -> Iterator[AIMessageChunk]: if not self._should_stream(async_api=False, **{**kwargs, "stream": True}): - # model doesn't implement streaming, so use default implementation + # Model doesn't implement streaming, so use default implementation yield cast( "AIMessageChunk", self.invoke(input, config=config, stop=stop, **kwargs), @@ -1284,6 +1284,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: + # We expect that subclasses implement this method if they support streaming. raise NotImplementedError async def _astream( diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index cc0e532f7c8..74e726b4e1e 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -39,7 +39,6 @@ class InputTokenDetails(TypedDict, total=False): Does *not* need to sum to full input token count. Does *not* need to have all keys. Example: - .. code-block:: python { @@ -75,7 +74,6 @@ class OutputTokenDetails(TypedDict, total=False): Does *not* need to sum to full output token count. Does *not* need to have all keys. Example: - .. code-block:: python { @@ -103,7 +101,6 @@ class UsageMetadata(TypedDict): This is a standard representation of token usage that is consistent across models. Example: - .. code-block:: python { diff --git a/libs/core/langchain_core/outputs/chat_generation.py b/libs/core/langchain_core/outputs/chat_generation.py index d42f2038d34..156544db05c 100644 --- a/libs/core/langchain_core/outputs/chat_generation.py +++ b/libs/core/langchain_core/outputs/chat_generation.py @@ -15,14 +15,14 @@ from langchain_core.utils._merge import merge_dicts class ChatGeneration(Generation): """A single chat generation output. - A subclass of Generation that represents the response from a chat model + A subclass of ``Generation`` that represents the response from a chat model that generates chat messages. - The `message` attribute is a structured representation of the chat message. - Most of the time, the message will be of type `AIMessage`. + The ``message`` attribute is a structured representation of the chat message. + Most of the time, the message will be of type ``AIMessage``. Users working with chat models will usually access information via either - `AIMessage` (returned from runnable interfaces) or `LLMResult` (available + ``AIMessage`` (returned from runnable interfaces) or ``LLMResult`` (available via callbacks). """ @@ -31,6 +31,7 @@ class ChatGeneration(Generation): .. warning:: SHOULD NOT BE SET DIRECTLY! + """ message: BaseMessage """The message output by the chat model.""" @@ -50,6 +51,7 @@ class ChatGeneration(Generation): Raises: ValueError: If the message is not a string or a list. + """ text = "" if isinstance(self.message.content, str): @@ -69,9 +71,9 @@ class ChatGeneration(Generation): class ChatGenerationChunk(ChatGeneration): - """ChatGeneration chunk. + """``ChatGeneration`` chunk. - ChatGeneration chunks can be concatenated with other ChatGeneration chunks. + ``ChatGeneration`` chunks can be concatenated with other ``ChatGeneration`` chunks. """ message: BaseMessageChunk @@ -83,11 +85,11 @@ class ChatGenerationChunk(ChatGeneration): def __add__( self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]] ) -> ChatGenerationChunk: - """Concatenate two ChatGenerationChunks. + """Concatenate two ``ChatGenerationChunks``. Args: - other: The other ChatGenerationChunk or list of ChatGenerationChunks to - concatenate. + other: The other ``ChatGenerationChunk`` or list of ``ChatGenerationChunk``s + to concatenate. """ if isinstance(other, ChatGenerationChunk): generation_info = merge_dicts( @@ -116,7 +118,7 @@ class ChatGenerationChunk(ChatGeneration): def merge_chat_generation_chunks( chunks: list[ChatGenerationChunk], ) -> Union[ChatGenerationChunk, None]: - """Merge a list of ChatGenerationChunks into a single ChatGenerationChunk.""" + """Merge list of ``ChatGenerationChunk``s into a single ``ChatGenerationChunk``.""" if not chunks: return None diff --git a/libs/core/langchain_core/prompt_values.py b/libs/core/langchain_core/prompt_values.py index 5f5dd7eb6b2..01827efcd8f 100644 --- a/libs/core/langchain_core/prompt_values.py +++ b/libs/core/langchain_core/prompt_values.py @@ -107,8 +107,12 @@ class ImageURL(TypedDict, total=False): """Image URL.""" detail: Literal["auto", "low", "high"] - """Specifies the detail level of the image. Defaults to "auto". - Can be "auto", "low", or "high".""" + """Specifies the detail level of the image. Defaults to ``'auto'``. + Can be ``'auto'``, ``'low'``, or ``'high'``. + + This follows OpenAI's Chat Completion API's image URL format. + + """ url: str """Either a URL of the image or the base64 encoded image data."""