diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py
index 39a653708b2..0a8f518b0a0 100644
--- a/libs/partners/openai/langchain_openai/chat_models/base.py
+++ b/libs/partners/openai/langchain_openai/chat_models/base.py
@@ -2146,9 +2146,13 @@ def _convert_to_openai_response_format(
     if isinstance(schema, type) and is_basemodel_subclass(schema):
         return schema
 
-    if "json_schema" in schema and schema.get("type") == "json_schema":
+    if (
+        isinstance(schema, dict)
+        and "json_schema" in schema
+        and schema.get("type") == "json_schema"
+    ):
         response_format = schema
-    elif "name" in schema and "schema" in schema:
+    elif isinstance(schema, dict) and "name" in schema and "schema" in schema:
         response_format = {"type": "json_schema", "json_schema": schema}
     else:
         strict = strict if strict is not None else True
diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py
index 8b96b89e083..4ea4da055db 100644
--- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py
+++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py
@@ -17,7 +17,8 @@ from langchain_core.messages import (
     ToolMessage,
 )
 from langchain_core.messages.ai import UsageMetadata
-from pydantic import BaseModel
+from pydantic import BaseModel, Field
+from typing_extensions import TypedDict
 
 from langchain_openai import ChatOpenAI
 from langchain_openai.chat_models.base import (
@@ -805,3 +806,42 @@ def test__convert_to_openai_response_format() -> None:
 
     with pytest.raises(ValueError):
         _convert_to_openai_response_format(response_format, strict=False)
+
+
+@pytest.mark.parametrize("method", ["function_calling", "json_schema"])
+@pytest.mark.parametrize("strict", [True, None])
+def test_structured_output_strict(
+    method: Literal["function_calling", "json_schema"], strict: Optional[bool]
+) -> None:
+    """Test to verify structured output with strict=True."""
+
+    llm = ChatOpenAI(model="gpt-4o-2024-08-06")
+
+    class Joke(BaseModel):
+        """Joke to tell user."""
+
+        setup: str = Field(description="question to set up a joke")
+        punchline: str = Field(description="answer to resolve the joke")
+
+    llm.with_structured_output(Joke, method=method, strict=strict)
+    # Schema
+    llm.with_structured_output(Joke.model_json_schema(), method=method, strict=strict)
+
+
+def test_nested_structured_output_strict() -> None:
+    """Test to verify structured output with strict=True for nested object."""
+
+    llm = ChatOpenAI(model="gpt-4o-2024-08-06")
+
+    class SelfEvaluation(TypedDict):
+        score: int
+        text: str
+
+    class JokeWithEvaluation(TypedDict):
+        """Joke to tell user."""
+
+        setup: str
+        punchline: str
+        self_evaluation: SelfEvaluation
+
+    llm.with_structured_output(JokeWithEvaluation, method="json_schema")