fix(openai): detect codex models for responses api preference (#35058)

This commit is contained in:
Guofang.Tang
2026-02-09 02:15:48 +08:00
committed by GitHub
parent 4276d31cb5
commit 06a7d079b0
2 changed files with 6 additions and 1 deletions

View File

@@ -477,7 +477,7 @@ def _handle_openai_bad_request(e: openai.BadRequestError) -> None:
def _model_prefers_responses_api(model_name: str | None) -> bool:
if not model_name:
return False
return "gpt-5.2-pro" in model_name
return "gpt-5.2-pro" in model_name or "codex" in model_name
_BM = TypeVar("_BM", bound=BaseModel)

View File

@@ -3191,7 +3191,12 @@ def test_gpt_5_1_temperature_with_reasoning_effort_none(
def test_model_prefers_responses_api() -> None:
assert _model_prefers_responses_api("gpt-5.2-pro")
assert _model_prefers_responses_api("gpt-5.2-codex")
assert _model_prefers_responses_api("gpt-5.1-codex")
assert _model_prefers_responses_api("gpt-5.1-codex-max")
assert _model_prefers_responses_api("gpt-5-codex")
assert not _model_prefers_responses_api("gpt-5.1")
assert not _model_prefers_responses_api("gpt-5")
def test_openai_structured_output_refusal_handling_responses_api() -> None: