Merge branch 'langchain-ai:master' into pprados/06-pdfplumber

This commit is contained in:
Philippe PRADOS 2025-02-24 15:07:23 +01:00 committed by GitHub
commit 60e364f3f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 22 additions and 1 deletions

View File

@ -22,7 +22,7 @@
"2. LangChain [Runnables](/docs/concepts/runnables);\n",
"3. By sub-classing from [BaseTool](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.base.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.tool.html#langchain_core.tools.tool). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.structured.StructuredTool.html#langchain_core.tools.structured.StructuredTool.from_function) class method.\n",
"Creating tools from functions may be sufficient for most use cases, and can be done via a simple [@tool decorator](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.convert.tool.html). If more configuration is needed-- e.g., specification of both sync and async implementations-- one can also use the [StructuredTool.from_function](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.structured.StructuredTool.html#langchain_core.tools.structured.StructuredTool.from_function) class method.\n",
"\n",
"In this guide we provide an overview of these methods.\n",
"\n",

View File

@ -6,6 +6,7 @@ import base64
import json
import logging
import os
import re
import sys
import warnings
from io import BytesIO
@ -2011,6 +2012,12 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
# in September 2024 release
if "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")
# Mutate system message role to "developer" for o-series models
if self.model_name and re.match(r"^o\d", self.model_name):
for message in payload.get("messages", []):
if message["role"] == "system":
message["role"] = "developer"
return payload
def _should_stream_usage(

View File

@ -881,6 +881,20 @@ def test__get_request_payload() -> None:
payload = llm._get_request_payload(messages)
assert payload == expected
# Test we coerce to developer role for o-series models
llm = ChatOpenAI(model="o3-mini")
payload = llm._get_request_payload(messages)
expected = {
"messages": [
{"role": "developer", "content": "hello"},
{"role": "developer", "content": "bye"},
{"role": "user", "content": "how are you"},
],
"model": "o3-mini",
"stream": False,
}
assert payload == expected
def test_init_o1() -> None:
with pytest.warns(None) as record: # type: ignore[call-overload]