openai[patch]: enable Azure structured output, parallel_tool_calls=Fa… (#26599)

…lse, tool_choice=required

response_format=json_schema, tool_choice=required, parallel_tool_calls
are all supported for gpt-4o on azure.
This commit is contained in:
Bagatur
2024-09-22 22:25:22 -07:00
committed by GitHub
parent bb40a0fb32
commit e1e4f88b3e
4 changed files with 98 additions and 350 deletions

View File

@@ -454,6 +454,23 @@ class BaseChatOpenAI(BaseChatModel):
making requests to OpenAI compatible APIs, such as vLLM."""
include_response_headers: bool = False
"""Whether to include response headers in the output message response_metadata."""
disabled_params: Optional[Dict[str, Any]] = Field(default=None)
"""Parameters of the OpenAI client or chat.completions endpoint that should be
disabled for the given model.
Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the
parameter and the value is either None, meaning that parameter should never be
used, or it's a list of disabled values for the parameter.
For example, older models may not support the 'parallel_tool_calls' parameter at
all, in which case ``disabled_params={"parallel_tool_calls: None}`` can ben passed
in.
If a parameter is disabled then it will not be used by default in any methods, e.g.
in :meth:`~langchain_openai.chat_models.base.ChatOpenAI.with_structured_output`.
However this does not prevent a user from directly passed in the parameter during
invocation.
"""
model_config = ConfigDict(populate_by_name=True)
@@ -1401,12 +1418,11 @@ class BaseChatOpenAI(BaseChatModel):
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
parallel_tool_calls=False,
strict=strict,
bind_kwargs = self._filter_disabled_params(
tool_choice=tool_name, parallel_tool_calls=False, strict=strict
)
llm = self.bind_tools([schema], **bind_kwargs)
if is_pydantic_schema:
output_parser: Runnable = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
@@ -1456,6 +1472,21 @@ class BaseChatOpenAI(BaseChatModel):
else:
return llm | output_parser
def _filter_disabled_params(self, **kwargs: Any) -> Dict[str, Any]:
if not self.disabled_params:
return kwargs
filtered = {}
for k, v in kwargs.items():
# Skip param
if k in self.disabled_params and (
self.disabled_params[k] is None or v in self.disabled_params[k]
):
continue
# Keep param
else:
filtered[k] = v
return filtered
class ChatOpenAI(BaseChatOpenAI):
"""OpenAI chat model integration.
@@ -2114,7 +2145,7 @@ def _oai_structured_outputs_parser(ai_msg: AIMessage) -> PydanticBaseModel:
else:
raise ValueError(
"Structured Output response does not have a 'parsed' field nor a 'refusal' "
"field."
"field. Received message:\n\n{ai_msg}"
)