Merge branch 'master' into pprados/pdf-router

This commit is contained in:
Philippe PRADOS 2025-04-17 09:06:27 +02:00 committed by Philippe Prados
commit dfda5a0355
33 changed files with 811 additions and 454 deletions

View File

@ -275,3 +275,7 @@ if os.environ.get("READTHEDOCS", "") == "True":
html_context["READTHEDOCS"] = True html_context["READTHEDOCS"] = True
master_doc = "index" master_doc = "index"
# If a signatures length in characters exceeds 60,
# each parameter within the signature will be displayed on an individual logical line
maximum_signature_line_length = 60

View File

@ -19,9 +19,15 @@
"\n", "\n",
"This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/chat_models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n", "This doc will help you get started with AWS Bedrock [chat models](/docs/concepts/chat_models). Amazon Bedrock is a fully managed service that offers a choice of high-performing foundation models (FMs) from leading AI companies like AI21 Labs, Anthropic, Cohere, Meta, Stability AI, and Amazon via a single API, along with a broad set of capabilities you need to build generative AI applications with security, privacy, and responsible AI. Using Amazon Bedrock, you can easily experiment with and evaluate top FMs for your use case, privately customize them with your data using techniques such as fine-tuning and Retrieval Augmented Generation (RAG), and build agents that execute tasks using your enterprise systems and data sources. Since Amazon Bedrock is serverless, you don't have to manage any infrastructure, and you can securely integrate and deploy generative AI capabilities into your applications using the AWS services you are already familiar with.\n",
"\n", "\n",
"For more information on which models are accessible via Bedrock, head to the [AWS docs](https://docs.aws.amazon.com/bedrock/latest/userguide/models-features.html).\n", "AWS Bedrock maintains a [Converse API](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html) which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html).\n",
"\n", "\n",
"For detailed documentation of all ChatBedrock features and configurations head to the [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html).\n", ":::info\n",
"\n",
"We recommend the Converse API for users who do not need to use custom models. It can be accessed using [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html).\n",
"\n",
":::\n",
"\n",
"For detailed documentation of all Bedrock features and configurations head to the [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html).\n",
"\n", "\n",
"## Overview\n", "## Overview\n",
"### Integration details\n", "### Integration details\n",
@ -29,8 +35,12 @@
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n", "| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/bedrock) | Package downloads | Package latest |\n",
"| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n",
"| [ChatBedrock](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-aws?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-aws?style=flat-square&label=%20) |\n", "| [ChatBedrock](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock.ChatBedrock.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-aws?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-aws?style=flat-square&label=%20) |\n",
"| [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) | [langchain-aws](https://python.langchain.com/api_reference/aws/index.html) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-aws?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-aws?style=flat-square&label=%20) |\n",
"\n", "\n",
"### Model features\n", "### Model features\n",
"\n",
"The below apply to both `ChatBedrock` and `ChatBedrockConverse`.\n",
"\n",
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
"| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ |\n", "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ❌ | ✅ | ❌ |\n",
@ -48,7 +58,9 @@
"cell_type": "markdown", "cell_type": "markdown",
"id": "72ee0c4b-9764-423a-9dbf-95129e185210", "id": "72ee0c4b-9764-423a-9dbf-95129e185210",
"metadata": {}, "metadata": {},
"source": "To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:" "source": [
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -98,11 +110,12 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain_aws import ChatBedrock\n", "from langchain_aws import ChatBedrockConverse\n",
"\n", "\n",
"llm = ChatBedrock(\n", "llm = ChatBedrockConverse(\n",
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n", " model_id=\"anthropic.claude-3-5-sonnet-20240620-v1:0\",\n",
" model_kwargs=dict(temperature=0),\n", " # temperature=...,\n",
" # max_tokens=...,\n",
" # other params...\n", " # other params...\n",
")" ")"
] ]
@ -117,19 +130,17 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 2,
"id": "62e0dbc3", "id": "fcd8de52-4a1b-4875-b463-d41b031e06a1",
"metadata": { "metadata": {},
"tags": []
},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 29, 'completion_tokens': 21, 'total_tokens': 50}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-fdb07dc3-ff72-430d-b22b-e7824b15c766-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})" "AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': 'b07d1630-06f2-44b1-82bf-e82538dd2215', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 16 Apr 2025 19:35:34 GMT', 'content-type': 'application/json', 'content-length': '206', 'connection': 'keep-alive', 'x-amzn-requestid': 'b07d1630-06f2-44b1-82bf-e82538dd2215'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [488]}, 'model_name': 'anthropic.claude-3-5-sonnet-20240620-v1:0'}, id='run-d09ed928-146a-4336-b1fd-b63c9e623494-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})"
] ]
}, },
"execution_count": 5, "execution_count": 2,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -148,7 +159,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 3,
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -156,9 +167,7 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"Voici la traduction en français :\n", "J'adore la programmation.\n"
"\n",
"J'aime la programmation.\n"
] ]
} }
], ],
@ -168,7 +177,146 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", "id": "4da16f3e-e80b-48c0-8036-c1cc5f7c8c05",
"metadata": {},
"source": [
"### Streaming\n",
"\n",
"Note that `ChatBedrockConverse` emits content blocks while streaming:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "605e04fa-1a76-47ac-8c92-fe128659663e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"content=[] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
"content=[{'type': 'text', 'text': 'J', 'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
"content=[{'type': 'text', 'text': \"'adore la\", 'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
"content=[{'type': 'text', 'text': ' programmation.', 'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
"content=[{'index': 0}] additional_kwargs={} response_metadata={} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
"content=[] additional_kwargs={} response_metadata={'stopReason': 'end_turn'} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd'\n",
"content=[] additional_kwargs={} response_metadata={'metrics': {'latencyMs': 600}, 'model_name': 'anthropic.claude-3-5-sonnet-20240620-v1:0'} id='run-d0e0836e-7146-4c3d-97c7-ad23dac6febd' usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}}\n"
]
}
],
"source": [
"for chunk in llm.stream(messages):\n",
" print(chunk)"
]
},
{
"cell_type": "markdown",
"id": "0ef05abb-9c04-4dc3-995e-f857779644d5",
"metadata": {},
"source": [
"You can filter to text using the [.text()](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.ai.AIMessage.html#langchain_core.messages.ai.AIMessage.text) method on the output:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2a4e743f-ea7d-4e5a-9b12-f9992362de8b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"|J|'adore la| programmation.||||"
]
}
],
"source": [
"for chunk in llm.stream(messages):\n",
" print(chunk.text(), end=\"|\")"
]
},
{
"cell_type": "markdown",
"id": "a77519e5-897d-41a0-a9bb-55300fa79efc",
"metadata": {},
"source": [
"## Prompt caching\n",
"\n",
"Bedrock supports [caching](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html) of elements of your prompts, including messages and tools. This allows you to re-use large documents, instructions, [few-shot documents](/docs/concepts/few_shot_prompting/), and other data to reduce latency and costs.\n",
"\n",
":::note\n",
"\n",
"Not all models support prompt caching. See supported models [here](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-caching.html#prompt-caching-models).\n",
"\n",
":::\n",
"\n",
"To enable caching on an element of a prompt, mark its associated content block using the `cachePoint` key. See example below:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "d5f63d01-85e8-4797-a2be-0fea747a6049",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"First invocation:\n",
"{'cache_creation': 1528, 'cache_read': 0}\n",
"\n",
"Second:\n",
"{'cache_creation': 0, 'cache_read': 1528}\n"
]
}
],
"source": [
"import requests\n",
"from langchain_aws import ChatBedrockConverse\n",
"\n",
"llm = ChatBedrockConverse(model=\"us.anthropic.claude-3-7-sonnet-20250219-v1:0\")\n",
"\n",
"# Pull LangChain readme\n",
"get_response = requests.get(\n",
" \"https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md\"\n",
")\n",
"readme = get_response.text\n",
"\n",
"messages = [\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\n",
" \"type\": \"text\",\n",
" \"text\": \"What's LangChain, according to its README?\",\n",
" },\n",
" {\n",
" \"type\": \"text\",\n",
" \"text\": f\"{readme}\",\n",
" },\n",
" {\n",
" \"cachePoint\": {\"type\": \"default\"},\n",
" },\n",
" ],\n",
" },\n",
"]\n",
"\n",
"response_1 = llm.invoke(messages)\n",
"response_2 = llm.invoke(messages)\n",
"\n",
"usage_1 = response_1.usage_metadata[\"input_token_details\"]\n",
"usage_2 = response_2.usage_metadata[\"input_token_details\"]\n",
"\n",
"print(f\"First invocation:\\n{usage_1}\")\n",
"print(f\"\\nSecond:\\n{usage_2}\")"
]
},
{
"cell_type": "markdown",
"id": "1b550667-af5b-4557-b84f-c8f865dad6cb",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Chaining\n", "## Chaining\n",
@ -179,13 +327,13 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 7,
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", "id": "6033f3fa-0e96-46e3-abb3-1530928fea88",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 23, 'completion_tokens': 11, 'total_tokens': 34}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-5ad005ce-9f31-4670-baa0-9373d418698a-0', usage_metadata={'input_tokens': 23, 'output_tokens': 11, 'total_tokens': 34})" "AIMessage(content=\"Here's the German translation:\\n\\nIch liebe das Programmieren.\", additional_kwargs={}, response_metadata={'ResponseMetadata': {'RequestId': '1de3d7c0-8062-4f7e-bb8a-8f725b97a8b0', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 16 Apr 2025 19:32:51 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '1de3d7c0-8062-4f7e-bb8a-8f725b97a8b0'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': [719]}, 'model_name': 'anthropic.claude-3-5-sonnet-20240620-v1:0'}, id='run-7021fcd7-704e-496b-a92e-210139614402-0', usage_metadata={'input_tokens': 23, 'output_tokens': 19, 'total_tokens': 42, 'input_token_details': {'cache_creation': 0, 'cache_read': 0}})"
] ]
}, },
"execution_count": 7, "execution_count": 7,
@ -216,131 +364,6 @@
")" ")"
] ]
}, },
{
"cell_type": "markdown",
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
"metadata": {},
"source": [
"## Bedrock Converse API\n",
"\n",
"AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released.\n",
"\n",
"We recommend using `ChatBedrockConverse` for users who do not need to use custom models.\n",
"\n",
"You can use it like so:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "ae728e59-94d4-40cf-9d24-25ad8723fc59",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", response_metadata={'ResponseMetadata': {'RequestId': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa', 'HTTPStatusCode': 200, 'HTTPHeaders': {'date': 'Wed, 21 Aug 2024 17:23:49 GMT', 'content-type': 'application/json', 'content-length': '243', 'connection': 'keep-alive', 'x-amzn-requestid': '4fcbfbe9-f916-4df2-b0bd-ea1147b550aa'}, 'RetryAttempts': 0}, 'stopReason': 'end_turn', 'metrics': {'latencyMs': 672}}, id='run-77ee9810-e32b-45dc-9ccb-6692253b1f45-0', usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50})"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_aws import ChatBedrockConverse\n",
"\n",
"llm = ChatBedrockConverse(\n",
" model=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" # other params...\n",
")\n",
"\n",
"llm.invoke(messages)"
]
},
{
"cell_type": "markdown",
"id": "4da16f3e-e80b-48c0-8036-c1cc5f7c8c05",
"metadata": {},
"source": [
"### Streaming\n",
"\n",
"Note that `ChatBedrockConverse` emits content blocks while streaming:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "7794b32e-d8de-4973-bf0f-39807dc745f0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"content=[] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': 'Vo', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': 'ici', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' tra', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': 'duction', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' en', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' français', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' :', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': '\\n\\nJ', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': \"'\", 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': 'a', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': 'ime', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' la', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': ' programm', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': 'ation', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'type': 'text', 'text': '.', 'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[{'index': 0}] id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[] response_metadata={'stopReason': 'end_turn'} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8'\n",
"content=[] response_metadata={'metrics': {'latencyMs': 713}} id='run-2c92c5af-d771-4cc2-98d9-c11bbd30a1d8' usage_metadata={'input_tokens': 29, 'output_tokens': 21, 'total_tokens': 50}\n"
]
}
],
"source": [
"for chunk in llm.stream(messages):\n",
" print(chunk)"
]
},
{
"cell_type": "markdown",
"id": "0ef05abb-9c04-4dc3-995e-f857779644d5",
"metadata": {},
"source": [
"An output parser can be used to filter to text, if desired:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "2a4e743f-ea7d-4e5a-9b12-f9992362de8b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"|Vo|ici| la| tra|duction| en| français| :|\n",
"\n",
"J|'|a|ime| la| programm|ation|.||||"
]
}
],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"chain = llm | StrOutputParser()\n",
"\n",
"for chunk in chain.stream(messages):\n",
" print(chunk, end=\"|\")"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",

View File

@ -408,7 +408,7 @@
"\n", "\n",
":::\n", ":::\n",
"\n", "\n",
"OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages.\n", "OpenAI supports a [Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions) API that is oriented toward building [agentic](/docs/concepts/agents/) applications. It includes a suite of [built-in tools](https://platform.openai.com/docs/guides/tools?api-mode=responses), including web and file search. It also supports management of [conversation state](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses), allowing you to continue a conversational thread without explicitly passing in previous messages, as well as the output from [reasoning processes](https://platform.openai.com/docs/guides/reasoning?api-mode=responses).\n",
"\n", "\n",
"`ChatOpenAI` will route to the Responses API if one of these features is used. You can also specify `use_responses_api=True` when instantiating `ChatOpenAI`.\n", "`ChatOpenAI` will route to the Responses API if one of these features is used. You can also specify `use_responses_api=True` when instantiating `ChatOpenAI`.\n",
"\n", "\n",
@ -1056,6 +1056,77 @@
"print(second_response.text())" "print(second_response.text())"
] ]
}, },
{
"cell_type": "markdown",
"id": "67bf5bd2-0935-40a0-b1cd-c6662b681d4b",
"metadata": {},
"source": [
"### Reasoning output\n",
"\n",
"Some OpenAI models will generate separate text content illustrating their reasoning process. See OpenAI's [reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses) for details.\n",
"\n",
"OpenAI can return a summary of the model's reasoning (although it doesn't expose the raw reasoning tokens). To configure `ChatOpenAI` to return this summary, specify the `reasoning` parameter:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "8d322f3a-0732-45ab-ac95-dfd4596e0d85",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'3^3 = 3 × 3 × 3 = 27.'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"reasoning = {\n",
" \"effort\": \"medium\", # 'low', 'medium', or 'high'\n",
" \"summary\": \"auto\", # 'detailed', 'auto', or None\n",
"}\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"o4-mini\",\n",
" use_responses_api=True,\n",
" model_kwargs={\"reasoning\": reasoning},\n",
")\n",
"response = llm.invoke(\"What is 3^3?\")\n",
"\n",
"# Output\n",
"response.text()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "d7dcc082-b7c8-41b7-a5e2-441b9679e41b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"**Calculating power of three**\n",
"\n",
"The user is asking for the result of 3 to the power of 3, which I know is 27. It's a straightforward question, so Ill keep my answer concise: 27. I could explain that this is the same as multiplying 3 by itself twice: 3 × 3 × 3 equals 27. However, since the user likely just needs the answer, Ill simply respond with 27.\n"
]
}
],
"source": [
"# Reasoning\n",
"reasoning = response.additional_kwargs[\"reasoning\"]\n",
"for block in reasoning[\"summary\"]:\n",
" print(block[\"text\"])"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "57e27714", "id": "57e27714",

View File

@ -35,9 +35,18 @@ from langchain_aws import ChatBedrock
``` ```
### Bedrock Converse ### Bedrock Converse
AWS has recently released the Bedrock Converse API which provides a unified conversational interface for Bedrock models. This API does not yet support custom models. You can see a list of all [models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html). To improve reliability the ChatBedrock integration will switch to using the Bedrock Converse API as soon as it has feature parity with the existing Bedrock API. Until then a separate [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) integration has been released. AWS Bedrock maintains a [Converse API](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_Converse.html)
that provides a unified conversational interface for Bedrock models. This API does not
yet support custom models. You can see a list of all
[models that are supported here](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html).
We recommend using `ChatBedrockConverse` for users who do not need to use custom models. See the [docs](/docs/integrations/chat/bedrock/#bedrock-converse-api) and [API reference](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html) for more detail. :::info
We recommend the Converse API for users who do not need to use custom models. It can be accessed using [ChatBedrockConverse](https://python.langchain.com/api_reference/aws/chat_models/langchain_aws.chat_models.bedrock_converse.ChatBedrockConverse.html).
:::
See a [usage example](/docs/integrations/chat/bedrock).
```python ```python
from langchain_aws import ChatBedrockConverse from langchain_aws import ChatBedrockConverse

View File

@ -0,0 +1,32 @@
# Smabbler
> Smabblers graph-powered platform boosts AI development by transforming data into a structured knowledge foundation.
# Galaxia
> Galaxia Knowledge Base is an integrated knowledge base and retrieval mechanism for RAG. In contrast to standard solution, it is based on Knowledge Graphs built using symbolic NLP and Knowledge Representation solutions. Provided texts are analysed and transformed into Graphs containing text, language and semantic information. This rich structure allows for retrieval that is based on semantic information, not on vector similarity/distance.
Implementing RAG using Galaxia involves first uploading your files to [Galaxia](https://beta.cloud.smabbler.com/home), analyzing them there and then building a model (knowledge graph). When the model is built, you can use `GalaxiaRetriever` to connect to the API and start retrieving.
More information: [docs](https://smabbler.gitbook.io/smabbler)
## Installation
```
pip install langchain-galaxia-retriever
```
## Usage
```
from langchain_galaxia_retriever.retriever import GalaxiaRetriever
gr = GalaxiaRetriever(
api_url="beta.api.smabbler.com",
api_key="<key>",
knowledge_base_id="<knowledge_base_id>",
n_retries=10,
wait_time=5,
)
result = gr.invoke('<test question>')
print(result)

View File

@ -0,0 +1,213 @@
{
"cells": [
{
"cell_type": "raw",
"id": "2af1fec5-4ca6-4167-8ee1-13314aac3258",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: Galaxia\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "1d7d6cbc-4373-4fb5-94dd-acd610165452",
"metadata": {},
"source": [
"# Galaxia Retriever\n",
"\n",
"Galaxia is GraphRAG solution, which automates document processing, knowledge base (Graph Language Model) creation and retrieval:\n",
"[galaxia-rag](https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag)\n",
"\n",
"To use Galaxia first upload your texts and create a Graph Language Model here: [smabbler-cloud](https://beta.cloud.smabbler.com)\n",
"\n",
"After the model is built and activated, you will be able to use this integration to retrieve what you need.\n",
"\n",
"The module repository is located here: [github](https://github.com/rrozanski-smabbler/galaxia-langchain)\n",
"\n",
"### Integration details\n",
"| Retriever | Self-host | Cloud offering | Package |\n",
"| :--- | :--- | :---: | :---: |\n",
"[Galaxia Retriever](https://github.com/rrozanski-smabbler/galaxia-langchain) | ❌ | ✅ | __langchain-galaxia-retriever__ |"
]
},
{
"cell_type": "markdown",
"id": "82fa1c05-c205-4429-a74c-e6c81c4e8611",
"metadata": {},
"source": [
"## Setup\n",
"Before you can retrieve anything you need to create your Graph Language Model here: [smabbler-cloud](https://beta.cloud.smabbler.com)\n",
"\n",
"following these 3 simple steps: [rag-instruction](https://smabbler.gitbook.io/smabbler/api-rag/build-rag-model-in-3-steps)\n",
"\n",
"Don't forget to activate the model after building it!"
]
},
{
"cell_type": "markdown",
"id": "91897867-eb39-4c3b-8df8-5427043ecdcd",
"metadata": {},
"source": [
"### Installation\n",
"The retriever is implemented in the following package: [pypi](https://pypi.org/project/langchain-galaxia-retriever/)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ceca36f2-013c-4b28-81fe-8808d0cf6419",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-galaxia-retriever"
]
},
{
"cell_type": "markdown",
"id": "019e0e50-5e66-440b-9cf1-d21b4009bf13",
"metadata": {},
"source": [
"## Instantiation"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c7188217-4b26-4201-b15a-b7a5f263f815",
"metadata": {},
"outputs": [],
"source": [
"from langchain_galaxia_retriever.retriever import GalaxiaRetriever\n",
"\n",
"gr = GalaxiaRetriever(\n",
" api_url=\"beta.api.smabbler.com\",\n",
" api_key=\"<key>\", # you can find it here: https://beta.cloud.smabbler.com/user/account\n",
" knowledge_base_id=\"<knowledge_base_id>\", # you can find it in https://beta.cloud.smabbler.com , in the model table\n",
" n_retries=10,\n",
" wait_time=5,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "02d288a5-4f76-472e-9a60-eea8e6b8dc7a",
"metadata": {},
"source": [
"## Usage"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5f79e03f-77a6-4eb6-b41d-f3da2f897654",
"metadata": {},
"outputs": [],
"source": [
"result = gr.invoke(\"<test question>\")\n",
"print(result)"
]
},
{
"cell_type": "markdown",
"id": "ffb2a595-a901-477a-a374-efd091bc1c9a",
"metadata": {},
"source": [
"## Use within a chain"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9c2e2394-ca33-47be-a851-551b4216daea",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ed8699d6-d65d-40ea-8c58-8d809cc512cf",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\n",
" \"\"\"Answer the question based only on the context provided.\n",
"\n",
"Context: {context}\n",
"\n",
"Question: {question}\"\"\"\n",
")\n",
"\n",
"\n",
"def format_docs(docs):\n",
" return \"\\n\\n\".join(doc.page_content for doc in docs)\n",
"\n",
"\n",
"chain = (\n",
" {\"context\": gr | format_docs, \"question\": RunnablePassthrough()}\n",
" | prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9b944d7-8800-4926-b1ce-fcdc52ecda1c",
"metadata": {},
"outputs": [],
"source": [
"chain.invoke(\"<test question>\")"
]
},
{
"cell_type": "markdown",
"id": "11b5c9a5-0a66-415f-98f8-f12080cad30a",
"metadata": {},
"source": [
"## API reference\n",
"\n",
"For more information about Galaxia Retriever check its implementation on github [github](https://github.com/rrozanski-smabbler/galaxia-langchain)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1696,7 +1696,7 @@ class PDFRouterParser(BaseBlobParser):
("Xdvipdfmx", {"producer": "xdvipdfmx.*", "page1":"Hello"}, PDFPlumberParser()), ("Xdvipdfmx", {"producer": "xdvipdfmx.*", "page1":"Hello"}, PDFPlumberParser()),
("defautl", {}, PyPDFium2Parser()) ("defautl", {}, PyPDFium2Parser())
] ]
loader = PDFRouterLoader(filename, routes) loader = PDFRouterLoader(filename, routes=routes)
loader.load() loader.load()
``` ```
""" """

View File

@ -276,9 +276,6 @@ class AIMessage(BaseMessage):
return (base.strip() + "\n" + "\n".join(lines)).strip() return (base.strip() + "\n" + "\n".join(lines)).strip()
AIMessage.model_rebuild()
class AIMessageChunk(AIMessage, BaseMessageChunk): class AIMessageChunk(AIMessage, BaseMessageChunk):
"""Message chunk from an AI.""" """Message chunk from an AI."""

View File

@ -22,9 +22,6 @@ class ChatMessage(BaseMessage):
"""The type of the message (used during serialization). Defaults to "chat".""" """The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk): class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk.""" """Chat Message chunk."""

View File

@ -30,9 +30,6 @@ class FunctionMessage(BaseMessage):
"""The type of the message (used for serialization). Defaults to "function".""" """The type of the message (used for serialization). Defaults to "function"."""
FunctionMessage.model_rebuild()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""Function Message chunk.""" """Function Message chunk."""

View File

@ -52,9 +52,6 @@ class HumanMessage(BaseMessage):
super().__init__(content=content, **kwargs) super().__init__(content=content, **kwargs)
HumanMessage.model_rebuild()
class HumanMessageChunk(HumanMessage, BaseMessageChunk): class HumanMessageChunk(HumanMessage, BaseMessageChunk):
"""Human Message chunk.""" """Human Message chunk."""

View File

@ -26,6 +26,3 @@ class RemoveMessage(BaseMessage):
raise ValueError(msg) raise ValueError(msg)
super().__init__("", id=id, **kwargs) super().__init__("", id=id, **kwargs)
RemoveMessage.model_rebuild()

View File

@ -46,9 +46,6 @@ class SystemMessage(BaseMessage):
super().__init__(content=content, **kwargs) super().__init__(content=content, **kwargs)
SystemMessage.model_rebuild()
class SystemMessageChunk(SystemMessage, BaseMessageChunk): class SystemMessageChunk(SystemMessage, BaseMessageChunk):
"""System Message chunk.""" """System Message chunk."""

View File

@ -146,9 +146,6 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
super().__init__(content=content, **kwargs) super().__init__(content=content, **kwargs)
ToolMessage.model_rebuild()
class ToolMessageChunk(ToolMessage, BaseMessageChunk): class ToolMessageChunk(ToolMessage, BaseMessageChunk):
"""Tool Message chunk.""" """Tool Message chunk."""

View File

@ -133,9 +133,6 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]):
yield [part] yield [part]
ListOutputParser.model_rebuild()
class CommaSeparatedListOutputParser(ListOutputParser): class CommaSeparatedListOutputParser(ListOutputParser):
"""Parse the output of an LLM call to a comma-separated list.""" """Parse the output of an LLM call to a comma-separated list."""

View File

@ -114,9 +114,6 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
return self.pydantic_object return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below. _PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}} As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}

View File

@ -31,6 +31,3 @@ class StrOutputParser(BaseTransformOutputParser[str]):
def parse(self, text: str) -> str: def parse(self, text: str) -> str:
"""Returns the input text with no changes.""" """Returns the input text with no changes."""
return text return text
StrOutputParser.model_rebuild()

View File

@ -132,6 +132,3 @@ class PipelinePromptTemplate(BasePromptTemplate):
@property @property
def _prompt_type(self) -> str: def _prompt_type(self) -> str:
raise ValueError raise ValueError
PipelinePromptTemplate.model_rebuild()

View File

@ -5650,9 +5650,6 @@ class RunnableBindingBase(RunnableSerializable[Input, Output]):
yield item yield item
RunnableBindingBase.model_rebuild()
class RunnableBinding(RunnableBindingBase[Input, Output]): class RunnableBinding(RunnableBindingBase[Input, Output]):
"""Wrap a Runnable with additional functionality. """Wrap a Runnable with additional functionality.

View File

@ -8,7 +8,6 @@ from abc import abstractmethod
from collections.abc import ( from collections.abc import (
AsyncIterator, AsyncIterator,
Iterator, Iterator,
Mapping, # noqa: F401 Needed by pydantic
Sequence, Sequence,
) )
from functools import wraps from functools import wraps
@ -464,9 +463,6 @@ class RunnableConfigurableFields(DynamicRunnable[Input, Output]):
return (self.default, config) return (self.default, config)
RunnableConfigurableFields.model_rebuild()
# Before Python 3.11 native StrEnum is not available # Before Python 3.11 native StrEnum is not available
class StrEnum(str, enum.Enum): class StrEnum(str, enum.Enum):
"""String enum.""" """String enum."""

View File

@ -6,6 +6,7 @@ import ast
import asyncio import asyncio
import inspect import inspect
import textwrap import textwrap
from collections.abc import Mapping, Sequence
from contextvars import Context from contextvars import Context
from functools import lru_cache from functools import lru_cache
from inspect import signature from inspect import signature
@ -33,8 +34,6 @@ if TYPE_CHECKING:
Awaitable, Awaitable,
Coroutine, Coroutine,
Iterable, Iterable,
Mapping,
Sequence,
) )
from langchain_core.runnables.schema import StreamEvent from langchain_core.runnables.schema import StreamEvent

View File

@ -176,6 +176,3 @@ class Tool(BaseTool):
args_schema=args_schema, args_schema=args_schema,
**kwargs, **kwargs,
) )
Tool.model_rebuild()

View File

@ -227,9 +227,6 @@ class SerializableModel(GenericFakeChatModel):
return True return True
SerializableModel.model_rebuild()
def test_serialization_with_rate_limiter() -> None: def test_serialization_with_rate_limiter() -> None:
"""Test model serialization with rate limiter.""" """Test model serialization with rate limiter."""
from langchain_core.load import dumps from langchain_core.load import dumps

View File

@ -45,8 +45,6 @@ def test_base_generation_parser() -> None:
assert isinstance(content, str) assert isinstance(content, str)
return content.swapcase() return content.swapcase()
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")])) model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase() chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO" assert chain.invoke("") == "HeLLO"

View File

@ -35,9 +35,6 @@ class FakeStructuredChatModel(FakeListChatModel):
return "fake-messages-list-chat-model" return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None: def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel): class OutputSchema(BaseModel):
name: str name: str

View File

@ -1188,9 +1188,6 @@ class HardCodedRetriever(BaseRetriever):
return self.documents return self.documents
HardCodedRetriever.model_rebuild()
async def test_event_stream_with_retriever() -> None: async def test_event_stream_with_retriever() -> None:
"""Test the event stream with a retriever.""" """Test the event stream with a retriever."""
retriever = HardCodedRetriever( retriever = HardCodedRetriever(

View File

@ -0,0 +1,20 @@
import importlib
from pathlib import Path
from pydantic import BaseModel
def test_all_models_built() -> None:
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module = importlib.import_module("langchain_core." + module_name)
all_ = getattr(module, "__all__", [])
for attr_name in all_:
attr = getattr(module, attr_name)
try:
if issubclass(attr, BaseModel):
assert attr.__pydantic_complete__ is True
except TypeError:
# This is expected for non-class attributes
pass

View File

@ -1091,9 +1091,6 @@ class FooBase(BaseTool):
return assert_bar(bar, bar_config) return assert_bar(bar, bar_config)
FooBase.model_rebuild()
class AFooBase(FooBase): class AFooBase(FooBase):
async def _arun(self, bar: Any, bar_config: RunnableConfig, **kwargs: Any) -> Any: async def _arun(self, bar: Any, bar_config: RunnableConfig, **kwargs: Any) -> Any:
return assert_bar(bar, bar_config) return assert_bar(bar, bar_config)

View File

@ -606,3 +606,7 @@ packages:
- name: langchain-ydb - name: langchain-ydb
path: . path: .
repo: ydb-platform/langchain-ydb repo: ydb-platform/langchain-ydb
- name: langchain-galaxia-retriever
provider_page: galaxia
path: .
repo: rrozanski-smabbler/galaxia-langchain

View File

@ -6,9 +6,9 @@ build-backend = "pdm.backend"
authors = [] authors = []
license = { text = "MIT" } license = { text = "MIT" }
requires-python = "<4.0,>=3.9" requires-python = "<4.0,>=3.9"
dependencies = ["ollama<1,>=0.4.4", "langchain-core<1.0.0,>=0.3.51"] dependencies = ["ollama<1,>=0.4.4", "langchain-core<1.0.0,>=0.3.52"]
name = "langchain-ollama" name = "langchain-ollama"
version = "0.3.1" version = "0.3.2"
description = "An integration package connecting Ollama and LangChain" description = "An integration package connecting Ollama and LangChain"
readme = "README.md" readme = "README.md"

View File

@ -1,7 +1,8 @@
version = 1 version = 1
requires-python = ">=3.9, <4.0" requires-python = ">=3.9, <4.0"
resolution-markers = [ resolution-markers = [
"python_full_version >= '3.12.4'", "python_full_version >= '3.13'",
"python_full_version >= '3.12.4' and python_full_version < '3.13'",
"python_full_version >= '3.12' and python_full_version < '3.12.4'", "python_full_version >= '3.12' and python_full_version < '3.12.4'",
"python_full_version < '3.12'", "python_full_version < '3.12'",
] ]
@ -287,7 +288,7 @@ wheels = [
[[package]] [[package]]
name = "langchain-core" name = "langchain-core"
version = "0.3.51" version = "0.3.52"
source = { editable = "../../core" } source = { editable = "../../core" }
dependencies = [ dependencies = [
{ name = "jsonpatch" }, { name = "jsonpatch" },
@ -323,10 +324,12 @@ test = [
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" }, { name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
{ name = "grandalf", specifier = ">=0.8,<1.0" }, { name = "grandalf", specifier = ">=0.8,<1.0" },
{ name = "langchain-tests", directory = "../../standard-tests" }, { name = "langchain-tests", directory = "../../standard-tests" },
{ name = "numpy", marker = "python_full_version < '3.12'", specifier = ">=1.24.0,<2.0.0" }, { name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.4" },
{ name = "numpy", marker = "python_full_version >= '3.12'", specifier = ">=1.26.0,<3" }, { name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
{ name = "pytest", specifier = ">=8,<9" }, { name = "pytest", specifier = ">=8,<9" },
{ name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" }, { name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" },
{ name = "pytest-benchmark" },
{ name = "pytest-codspeed" },
{ name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" }, { name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" },
{ name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" }, { name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" },
{ name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" }, { name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" },
@ -337,15 +340,14 @@ test = [
test-integration = [] test-integration = []
typing = [ typing = [
{ name = "langchain-text-splitters", directory = "../../text-splitters" }, { name = "langchain-text-splitters", directory = "../../text-splitters" },
{ name = "mypy", specifier = ">=1.10,<1.11" }, { name = "mypy", specifier = ">=1.15,<1.16" },
{ name = "types-jinja2", specifier = ">=2.11.9,<3.0.0" },
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
{ name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" }, { name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" },
] ]
[[package]] [[package]]
name = "langchain-ollama" name = "langchain-ollama"
version = "0.3.1" version = "0.3.2"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "langchain-core" }, { name = "langchain-core" },
@ -403,7 +405,7 @@ typing = [
[[package]] [[package]]
name = "langchain-tests" name = "langchain-tests"
version = "0.3.17" version = "0.3.18"
source = { editable = "../../standard-tests" } source = { editable = "../../standard-tests" }
dependencies = [ dependencies = [
{ name = "httpx" }, { name = "httpx" },
@ -420,7 +422,8 @@ dependencies = [
requires-dist = [ requires-dist = [
{ name = "httpx", specifier = ">=0.25.0,<1" }, { name = "httpx", specifier = ">=0.25.0,<1" },
{ name = "langchain-core", editable = "../../core" }, { name = "langchain-core", editable = "../../core" },
{ name = "numpy", specifier = ">=1.26.2,<3" }, { name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.2" },
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
{ name = "pytest", specifier = ">=7,<9" }, { name = "pytest", specifier = ">=7,<9" },
{ name = "pytest-asyncio", specifier = ">=0.20,<1" }, { name = "pytest-asyncio", specifier = ">=0.20,<1" },
{ name = "pytest-socket", specifier = ">=0.6.0,<1" }, { name = "pytest-socket", specifier = ">=0.6.0,<1" },
@ -558,7 +561,8 @@ name = "numpy"
version = "2.2.2" version = "2.2.2"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
resolution-markers = [ resolution-markers = [
"python_full_version >= '3.12.4'", "python_full_version >= '3.13'",
"python_full_version >= '3.12.4' and python_full_version < '3.13'",
"python_full_version >= '3.12' and python_full_version < '3.12.4'", "python_full_version >= '3.12' and python_full_version < '3.12.4'",
] ]
sdist = { url = "https://files.pythonhosted.org/packages/ec/d0/c12ddfd3a02274be06ffc71f3efc6d0e457b0409c4481596881e748cb264/numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f", size = 20233295 } sdist = { url = "https://files.pythonhosted.org/packages/ec/d0/c12ddfd3a02274be06ffc71f3efc6d0e457b0409c4481596881e748cb264/numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f", size = 20233295 }

View File

@ -2132,6 +2132,40 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
"Your name is Bob. How can I help you today, Bob?" "Your name is Bob. How can I help you today, Bob?"
.. dropdown:: Reasoning output
OpenAI's Responses API supports `reasoning models <https://platform.openai.com/docs/guides/reasoning?api-mode=responses>`_
that expose a summary of internal reasoning processes.
.. code-block:: python
from langchain_openai import ChatOpenAI
reasoning = {
"effort": "medium", # 'low', 'medium', or 'high'
"summary": "auto", # 'detailed', 'auto', or None
}
llm = ChatOpenAI(
model="o4-mini", use_responses_api=True, model_kwargs={"reasoning": reasoning}
)
response = llm.invoke("What is 3^3?")
print(f"Output: {response.text()}")
print(f"Reasoning: {response.additional_kwargs['reasoning']}")
.. code-block:: none
Output: 3^3 = 27.
Reasoning: {
'id': 'rs_67fffc44b1c08191b6ca9bead6d832590433145b1786f809',
'summary': [
{'text': 'The user wants to know...', 'type': 'summary_text'}
],
'type': 'reasoning'
}
.. dropdown:: Structured output .. dropdown:: Structured output
.. code-block:: python .. code-block:: python

View File

@ -2338,7 +2338,7 @@ dependencies = [
requires-dist = [ requires-dist = [
{ name = "chromadb", specifier = ">=0.4.0,!=0.5.4,!=0.5.5,!=0.5.7,!=0.5.9,!=0.5.10,!=0.5.11,!=0.5.12,<0.7.0" }, { name = "chromadb", specifier = ">=0.4.0,!=0.5.4,!=0.5.5,!=0.5.7,!=0.5.9,!=0.5.10,!=0.5.11,!=0.5.12,<0.7.0" },
{ name = "langchain-core", editable = "libs/core" }, { name = "langchain-core", editable = "libs/core" },
{ name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.22.4" }, { name = "numpy", marker = "python_full_version < '3.13'", specifier = ">=1.26.0" },
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" }, { name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
] ]
@ -2451,7 +2451,7 @@ typing = [
{ name = "langchain", editable = "libs/langchain" }, { name = "langchain", editable = "libs/langchain" },
{ name = "langchain-core", editable = "libs/core" }, { name = "langchain-core", editable = "libs/core" },
{ name = "langchain-text-splitters", editable = "libs/text-splitters" }, { name = "langchain-text-splitters", editable = "libs/text-splitters" },
{ name = "mypy", specifier = ">=1.12,<2.0" }, { name = "mypy", specifier = ">=1.15,<2.0" },
{ name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" }, { name = "mypy-protobuf", specifier = ">=3.0.0,<4.0.0" },
{ name = "types-chardet", specifier = ">=5.0.4.6,<6.0.0.0" }, { name = "types-chardet", specifier = ">=5.0.4.6,<6.0.0.0" },
{ name = "types-pytz", specifier = ">=2023.3.0.0,<2024.0.0.0" }, { name = "types-pytz", specifier = ">=2023.3.0.0,<2024.0.0.0" },
@ -2503,6 +2503,8 @@ test = [
{ name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" }, { name = "numpy", marker = "python_full_version >= '3.13'", specifier = ">=2.1.0" },
{ name = "pytest", specifier = ">=8,<9" }, { name = "pytest", specifier = ">=8,<9" },
{ name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" }, { name = "pytest-asyncio", specifier = ">=0.21.1,<1.0.0" },
{ name = "pytest-benchmark" },
{ name = "pytest-codspeed" },
{ name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" }, { name = "pytest-mock", specifier = ">=3.10.0,<4.0.0" },
{ name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" }, { name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" },
{ name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" }, { name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" },
@ -2513,8 +2515,7 @@ test = [
test-integration = [] test-integration = []
typing = [ typing = [
{ name = "langchain-text-splitters", directory = "libs/text-splitters" }, { name = "langchain-text-splitters", directory = "libs/text-splitters" },
{ name = "mypy", specifier = ">=1.10,<1.11" }, { name = "mypy", specifier = ">=1.15,<1.16" },
{ name = "types-jinja2", specifier = ">=2.11.9,<3.0.0" },
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
{ name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" }, { name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" },
] ]