diff --git a/libs/core/README.md b/libs/core/README.md index a58084d1548..623e9adbfd6 100644 --- a/libs/core/README.md +++ b/libs/core/README.md @@ -7,8 +7,8 @@ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs). -To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). -[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. +To help you ship LangChain apps to production faster, check out [LangSmith](https://www.langchain.com/langsmith). +[LangSmith](https://www.langchain.com/langsmith) is a unified developer platform for building, testing, and monitoring LLM applications. ## Quick Install diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 4a1c3867745..7e11c491d6f 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -214,7 +214,7 @@ async def agenerate_from_stream( """Async generate from a stream. Args: - stream: Iterator of `ChatGenerationChunk`. + stream: AsyncIterator of `ChatGenerationChunk`. Returns: Chat result. @@ -310,7 +310,7 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC): - If `False` (Default), will always use streaming case if available. The main reason for this flag is that code might be written using `stream` and - a user may want to swap out a given model for another model whose the implementation + a user may want to swap out a given model for another model whose implementation does not properly support streaming. """ diff --git a/libs/core/langchain_core/language_models/llms.py b/libs/core/langchain_core/language_models/llms.py index 04e5600bd27..5aa287ada8e 100644 --- a/libs/core/langchain_core/language_models/llms.py +++ b/libs/core/langchain_core/language_models/llms.py @@ -311,7 +311,7 @@ class BaseLLM(BaseLanguageModel[str], ABC): @property @override def OutputType(self) -> type[str]: - """Get the input type for this `Runnable`.""" + """Get the output type for this `Runnable`.""" return str def _convert_input(self, model_input: LanguageModelInput) -> PromptValue: diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index 192c817197e..312d416799d 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -264,8 +264,9 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler): - `'original'` is the format used by all current tracers. This format is slightly inconsistent with respect to inputs and outputs. - 'streaming_events' is used for supporting streaming events, for - internal usage. It will likely change in the future, or deprecated - entirely in favor of a dedicated async tracer for streaming events. + internal usage. It will likely change in the future, + or be deprecated entirely in favor of a dedicated async + tracer for streaming events. Raises: ValueError: If an invalid schema format is provided (internal use only). @@ -356,7 +357,7 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler): yield chunk def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]: - """Tap an output async iterator to stream its values to the log. + """Tap an output iterator to stream its values to the log. Args: run_id: The ID of the run. @@ -673,7 +674,7 @@ async def _astream_log_implementation( """Implementation of astream_log for a given runnable. The implementation has been factored out (at least temporarily) as both - `astream_log` and `astream_events` relies on it. + `astream_log` and `astream_events` rely on it. Args: runnable: The runnable to run in streaming mode. diff --git a/libs/langchain/README.md b/libs/langchain/README.md index d5f9b82452c..c6d8dc6c246 100644 --- a/libs/langchain/README.md +++ b/libs/langchain/README.md @@ -7,8 +7,8 @@ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs). -To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). -[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. +To help you ship LangChain apps to production faster, check out [LangSmith](https://www.langchain.com/langsmith). +[LangSmith](https://www.langchain.com/langsmith) is a unified developer platform for building, testing, and monitoring LLM applications. ## Quick Install diff --git a/libs/langchain_v1/README.md b/libs/langchain_v1/README.md index 2796a2d40b5..581187e58da 100644 --- a/libs/langchain_v1/README.md +++ b/libs/langchain_v1/README.md @@ -7,8 +7,8 @@ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs). -To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com). -[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications. +To help you ship LangChain apps to production faster, check out [LangSmith](https://www.langchain.com/langsmith). +[LangSmith](https://www.langchain.com/langsmith) is a unified developer platform for building, testing, and monitoring LLM applications. ## Quick Install