Compare commits

...

9 Commits

Author SHA1 Message Date
Chester Curme
d765a91c5c avoid overwriting model param if passed explicitly 2025-09-08 09:57:51 -04:00
Chester Curme
88af494b37 Merge branch 'master' into fix/responses-api 2025-09-08 09:54:46 -04:00
Chester Curme
08c4055347 Merge branch 'master' into fix/responses-api 2025-09-08 09:42:36 -04:00
Chester Curme
45f1b67340 fix for null model name (Azure) 2025-08-22 10:14:34 -04:00
Chester Curme
8e37d39d66 remove model param from Azure integration tests 2025-08-22 10:13:43 -04:00
강준형
be9274054f fix(openai): Update Azure unit tests to use SecretStr for API key
- Changed the API key parameter in AzureChatOpenAI instantiation to use SecretStr for better security.
- Simplified the test documentation by removing redundant phrases while maintaining clarity.
- Ensures unit tests adhere to best practices for handling sensitive information.
2025-08-22 22:26:06 +09:00
강준형
03b9214737 fix(openai): Remove unnecessary assertion in Azure unit tests
- Removed the assertion for 'reasoning' in the Responses API test as it is not applicable.
- Ensures unit tests are streamlined and focused on relevant fields for the Responses API.
2025-08-22 22:23:40 +09:00
강준형
fcebafea9b fix(openai): Update model assertions in Azure unit tests
- Responses API model field updated to use 'your_deployment' instead of custom name
- Chat Completions API model field updated to 'gpt-5' instead of 'gpt-4o'
- Ensures unit tests reflect the correct model names for both APIs
2025-08-22 22:20:28 +09:00
강준형
3efa31d786 fix(openai): Use Azure deployment name for Responses API model field
- Azure Responses API requires deployment name instead of model name
   - Maintains backward compatibility for Chat Completions API
   - Adds unit tests to verify the fix

   Fixes: Azure OpenAI Responses API model field issue
2025-08-22 22:04:37 +09:00
4 changed files with 61 additions and 3 deletions

View File

@@ -754,6 +754,26 @@ class AzureChatOpenAI(BaseChatOpenAI):
return chat_result
def _get_request_payload(
self,
input_: LanguageModelInput,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> dict:
"""Get the request payload, using deployment name for Azure Responses API."""
payload = super()._get_request_payload(input_, stop=stop, **kwargs)
# For Azure Responses API, use deployment name instead of model name
if (
self._use_responses_api(payload)
and not payload.get("model")
and self.deployment_name
):
payload["model"] = self.deployment_name
return payload
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
"""Route to Chat Completions or Responses API."""
if self._use_responses_api({**kwargs, **self.model_kwargs}):

View File

@@ -3541,7 +3541,7 @@ def _construct_responses_api_payload(
payload["reasoning"] = {"effort": payload.pop("reasoning_effort")}
# Remove temperature parameter for models that don't support it in responses API
model = payload.get("model", "")
model = payload.get("model") or ""
if model.startswith("gpt-5") and "chat" not in model: # gpt-5-chat supports
payload.pop("temperature", None)

View File

@@ -21,7 +21,6 @@ class TestAzureOpenAIStandard(ChatModelIntegrationTests):
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
@@ -49,7 +48,6 @@ class TestAzureOpenAIResponses(ChatModelIntegrationTests):
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"use_responses_api": True,

View File

@@ -5,6 +5,7 @@ from unittest import mock
import pytest
from langchain_core.messages import HumanMessage
from pydantic import SecretStr
from typing_extensions import TypedDict
from langchain_openai import AzureChatOpenAI
@@ -99,3 +100,42 @@ def test_max_completion_tokens_in_payload() -> None:
"stream": False,
"max_completion_tokens": 300,
}
def test_responses_api_uses_deployment_name() -> None:
"""Test that Azure deployment name is used for Responses API."""
llm = AzureChatOpenAI(
azure_deployment="your_deployment",
api_version="2025-04-01-preview",
azure_endpoint="your_endpoint",
api_key=SecretStr("your_api_key"),
# Force Responses API usage by including a Responses-only parameter
use_responses_api=True,
output_version="responses/v1",
)
messages = [HumanMessage("Hello")]
payload = llm._get_request_payload(messages)
# For Responses API, the model field should be the deployment name
assert payload["model"] == "your_deployment"
assert "input" in payload # Responses API uses 'input' instead of 'messages'
def test_chat_completions_api_uses_model_name() -> None:
"""Test that regular Chat Completions API still uses model name."""
llm = AzureChatOpenAI(
azure_deployment="your_deployment",
model="gpt-5", # This is the OpenAI model name
api_version="2025-04-01-preview",
azure_endpoint="your_endpoint",
api_key=SecretStr("your_api_key"),
# No Responses-only parameters, so Chat Completions API will be used
)
messages = [HumanMessage("Hello")]
payload = llm._get_request_payload(messages)
# For Chat Completions API, the model field should still be None/model_name
# Azure Chat Completions uses deployment in the URL, not in the model field
assert payload["model"] == "gpt-5"
assert "messages" in payload # Chat Completions API uses 'messages'
assert "input" not in payload