From bbb60e210a09a8c28ca48a9d7bd6005bda771c31 Mon Sep 17 00:00:00 2001 From: ccurme Date: Fri, 30 May 2025 09:29:36 -0400 Subject: [PATCH 1/3] docs: add example of simultaneous tool-calling + structured output for OpenAI (#31433) --- docs/docs/integrations/chat/openai.ipynb | 50 ++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/docs/docs/integrations/chat/openai.ipynb b/docs/docs/integrations/chat/openai.ipynb index 8df8e6d0c1b..58bec4b2f62 100644 --- a/docs/docs/integrations/chat/openai.ipynb +++ b/docs/docs/integrations/chat/openai.ipynb @@ -397,6 +397,56 @@ "For more on binding tools and tool call outputs, head to the [tool calling](/docs/how_to/function_calling) docs." ] }, + { + "cell_type": "markdown", + "id": "f06789fb-61e1-4b35-a2b5-2dea18c1a949", + "metadata": {}, + "source": [ + "### Structured output and tool calls\n", + "\n", + "OpenAI's [structured output](https://platform.openai.com/docs/guides/structured-outputs) feature can be used simultaneously with tool-calling. The model will either generate tool calls or a response adhering to a desired schema. See example below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15d2b6e0-f457-4abd-a4d5-08b210d09c04", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import ChatOpenAI\n", + "from pydantic import BaseModel\n", + "\n", + "\n", + "def get_weather(location: str) -> None:\n", + " \"\"\"Get weather at a location.\"\"\"\n", + " return \"It's sunny.\"\n", + "\n", + "\n", + "class OutputSchema(BaseModel):\n", + " \"\"\"Schema for response.\"\"\"\n", + "\n", + " answer: str\n", + " justification: str\n", + "\n", + "\n", + "llm = ChatOpenAI(model=\"gpt-4.1\")\n", + "\n", + "structured_llm = llm.bind_tools(\n", + " [get_weather],\n", + " response_format=OutputSchema,\n", + " strict=True,\n", + ")\n", + "\n", + "# Response contains tool calls:\n", + "tool_call_response = structured_llm.invoke(\"What is the weather in SF?\")\n", + "\n", + "# structured_response.additional_kwargs[\"parsed\"] contains parsed output\n", + "structured_response = structured_llm.invoke(\n", + " \"What weighs more, a pound of feathers or a pound of gold?\"\n", + ")" + ] + }, { "cell_type": "markdown", "id": "84833dd0-17e9-4269-82ed-550639d65751", From 5b9394319b1b83991746cba0dc0477123fc02a6a Mon Sep 17 00:00:00 2001 From: Jorge Piedrahita Ortiz Date: Fri, 30 May 2025 11:07:04 -0500 Subject: [PATCH 2/3] docs: samabanova doc minor fixes (#31436) - **Description:** samabanova provider docs minor fixes --- docs/docs/integrations/llms/sambanovacloud.ipynb | 4 ++-- docs/docs/integrations/providers/sambanova.ipynb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/integrations/llms/sambanovacloud.ipynb b/docs/docs/integrations/llms/sambanovacloud.ipynb index cffe08553fa..b3cc05a4d55 100644 --- a/docs/docs/integrations/llms/sambanovacloud.ipynb +++ b/docs/docs/integrations/llms/sambanovacloud.ipynb @@ -74,14 +74,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from langchain_community.llms.sambanova import SambaNovaCloud\n", "\n", "llm = SambaNovaCloud(\n", - " model=\"Meta-Llama-3.1-70B-Instruct\",\n", + " model=\"Meta-Llama-3.3-70B-Instruct\",\n", " max_tokens_to_generate=1000,\n", " temperature=0.01,\n", " # top_k = 50,\n", diff --git a/docs/docs/integrations/providers/sambanova.ipynb b/docs/docs/integrations/providers/sambanova.ipynb index 1027cf6609e..608f5f3e187 100644 --- a/docs/docs/integrations/providers/sambanova.ipynb +++ b/docs/docs/integrations/providers/sambanova.ipynb @@ -130,7 +130,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For a more detailed walkthrough of the SambaStudioEmbeddings component, see [this notebook](https://python.langchain.com/docs/integrations/text_embedding/sambanova/)" + "For a more detailed walkthrough of the SambaNovaCloudEmbeddings component, see [this notebook](https://python.langchain.com/docs/integrations/text_embedding/sambanova/)" ] }, { From 5bf89628bf82a5ddf9d0dd4bc0926e61ccc4657d Mon Sep 17 00:00:00 2001 From: ccurme Date: Fri, 30 May 2025 13:27:12 -0400 Subject: [PATCH 3/3] groq[patch]: update model for integration tests (#31440) Llama-3.1 started failing consistently with > groq.BadRequestError: Error code: 400 - ***'error': ***'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '***"query": "Hello!"***'*** --- .../groq/tests/integration_tests/test_standard.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index 04e68fa567f..90b9f16a5e5 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -29,14 +29,10 @@ class BaseTestGroq(ChatModelIntegrationTests): return True -class TestGroqLlama(BaseTestGroq): +class TestGroqGemma(BaseTestGroq): @property def chat_model_params(self) -> dict: - return { - "model": "llama-3.1-8b-instant", - "temperature": 0, - "rate_limiter": rate_limiter, - } + return {"model": "gemma2-9b-it", "rate_limiter": rate_limiter} @property def supports_json_mode(self) -> bool: