mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-13 14:50:00 +00:00
Merge branch 'master' into eugene/responses_api_2
This commit is contained in:
commit
2358235342
@ -397,6 +397,56 @@
|
|||||||
"For more on binding tools and tool call outputs, head to the [tool calling](/docs/how_to/function_calling) docs."
|
"For more on binding tools and tool call outputs, head to the [tool calling](/docs/how_to/function_calling) docs."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "f06789fb-61e1-4b35-a2b5-2dea18c1a949",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Structured output and tool calls\n",
|
||||||
|
"\n",
|
||||||
|
"OpenAI's [structured output](https://platform.openai.com/docs/guides/structured-outputs) feature can be used simultaneously with tool-calling. The model will either generate tool calls or a response adhering to a desired schema. See example below:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "15d2b6e0-f457-4abd-a4d5-08b210d09c04",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain_openai import ChatOpenAI\n",
|
||||||
|
"from pydantic import BaseModel\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def get_weather(location: str) -> None:\n",
|
||||||
|
" \"\"\"Get weather at a location.\"\"\"\n",
|
||||||
|
" return \"It's sunny.\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"class OutputSchema(BaseModel):\n",
|
||||||
|
" \"\"\"Schema for response.\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
" answer: str\n",
|
||||||
|
" justification: str\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"llm = ChatOpenAI(model=\"gpt-4.1\")\n",
|
||||||
|
"\n",
|
||||||
|
"structured_llm = llm.bind_tools(\n",
|
||||||
|
" [get_weather],\n",
|
||||||
|
" response_format=OutputSchema,\n",
|
||||||
|
" strict=True,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# Response contains tool calls:\n",
|
||||||
|
"tool_call_response = structured_llm.invoke(\"What is the weather in SF?\")\n",
|
||||||
|
"\n",
|
||||||
|
"# structured_response.additional_kwargs[\"parsed\"] contains parsed output\n",
|
||||||
|
"structured_response = structured_llm.invoke(\n",
|
||||||
|
" \"What weighs more, a pound of feathers or a pound of gold?\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "84833dd0-17e9-4269-82ed-550639d65751",
|
"id": "84833dd0-17e9-4269-82ed-550639d65751",
|
||||||
|
@ -74,14 +74,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain_community.llms.sambanova import SambaNovaCloud\n",
|
"from langchain_community.llms.sambanova import SambaNovaCloud\n",
|
||||||
"\n",
|
"\n",
|
||||||
"llm = SambaNovaCloud(\n",
|
"llm = SambaNovaCloud(\n",
|
||||||
" model=\"Meta-Llama-3.1-70B-Instruct\",\n",
|
" model=\"Meta-Llama-3.3-70B-Instruct\",\n",
|
||||||
" max_tokens_to_generate=1000,\n",
|
" max_tokens_to_generate=1000,\n",
|
||||||
" temperature=0.01,\n",
|
" temperature=0.01,\n",
|
||||||
" # top_k = 50,\n",
|
" # top_k = 50,\n",
|
||||||
|
@ -130,7 +130,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"For a more detailed walkthrough of the SambaStudioEmbeddings component, see [this notebook](https://python.langchain.com/docs/integrations/text_embedding/sambanova/)"
|
"For a more detailed walkthrough of the SambaNovaCloudEmbeddings component, see [this notebook](https://python.langchain.com/docs/integrations/text_embedding/sambanova/)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -29,14 +29,10 @@ class BaseTestGroq(ChatModelIntegrationTests):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class TestGroqLlama(BaseTestGroq):
|
class TestGroqGemma(BaseTestGroq):
|
||||||
@property
|
@property
|
||||||
def chat_model_params(self) -> dict:
|
def chat_model_params(self) -> dict:
|
||||||
return {
|
return {"model": "gemma2-9b-it", "rate_limiter": rate_limiter}
|
||||||
"model": "llama-3.1-8b-instant",
|
|
||||||
"temperature": 0,
|
|
||||||
"rate_limiter": rate_limiter,
|
|
||||||
}
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supports_json_mode(self) -> bool:
|
def supports_json_mode(self) -> bool:
|
||||||
|
Loading…
Reference in New Issue
Block a user