diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index b60d57d844b..3726e30d8b5 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -1551,7 +1551,7 @@ def convert_to_openai_messages( { "role": "user", "content": [ - {"type": "text", "text": "whats in this"}, + {"type": "text", "text": "what's in this"}, { "type": "image_url", "image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"}, @@ -1570,15 +1570,15 @@ def convert_to_openai_messages( ], ), ToolMessage("foobar", tool_call_id="1", name="bar"), - {"role": "assistant", "content": "thats nice"}, + {"role": "assistant", "content": "that's nice"}, ] oai_messages = convert_to_openai_messages(messages) # -> [ # {'role': 'system', 'content': 'foo'}, - # {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]}, + # {'role': 'user', 'content': [{'type': 'text', 'text': 'what's in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]}, # {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''}, # {'role': 'tool', 'name': 'bar', 'content': 'foobar'}, - # {'role': 'assistant', 'content': 'thats nice'} + # {'role': 'assistant', 'content': 'that's nice'} # ] ``` diff --git a/libs/core/langchain_core/runnables/__init__.py b/libs/core/langchain_core/runnables/__init__.py index 70306d89182..d619bfa88d6 100644 --- a/libs/core/langchain_core/runnables/__init__.py +++ b/libs/core/langchain_core/runnables/__init__.py @@ -3,7 +3,7 @@ The LangChain Expression Language (LCEL) offers a declarative method to build production-grade programs that harness the power of LLMs. -Programs created using LCEL and LangChain `Runnable` objects inherently suppor +Programs created using LCEL and LangChain `Runnable` objects inherently support synchronous asynchronous, batch, and streaming operations. Support for **async** allows servers hosting LCEL based programs to scale bette for diff --git a/libs/core/langchain_core/tools/__init__.py b/libs/core/langchain_core/tools/__init__.py index ecfbd5ef8de..445ca990909 100644 --- a/libs/core/langchain_core/tools/__init__.py +++ b/libs/core/langchain_core/tools/__init__.py @@ -1,6 +1,6 @@ """Tools are classes that an Agent uses to interact with the world. -Each tool has a description. Agent uses the description to choose the righ tool for the +Each tool has a description. Agent uses the description to choose the right tool for the job. """ diff --git a/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py b/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py index 849f2a2a2c6..dc08b223ae4 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py +++ b/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py @@ -815,7 +815,7 @@ def test_parse_with_different_pydantic_2_v1() -> None: temperature: int forecast: str - # Can't get pydantic to work here due to the odd typing of tryig to support + # Can't get pydantic to work here due to the odd typing of trying to support # both v1 and v2 in the same codebase. parser = PydanticToolsParser(tools=[Forecast]) message = AIMessage( @@ -848,7 +848,7 @@ def test_parse_with_different_pydantic_2_proper() -> None: temperature: int forecast: str - # Can't get pydantic to work here due to the odd typing of tryig to support + # Can't get pydantic to work here due to the odd typing of trying to support # both v1 and v2 in the same codebase. parser = PydanticToolsParser(tools=[Forecast]) message = AIMessage( diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index 6ab3b798fb5..0c0299ec74d 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -2411,7 +2411,7 @@ def test_fine_grained_tool_streaming() -> None: @pytest.mark.vcr def test_compaction() -> None: - """Test the compation beta feature.""" + """Test the compaction beta feature.""" llm = ChatAnthropic( model="claude-opus-4-6", # type: ignore[call-arg] betas=["compact-2026-01-12"], @@ -2465,7 +2465,7 @@ def test_compaction() -> None: @pytest.mark.vcr def test_compaction_streaming() -> None: - """Test the compation beta feature.""" + """Test the compaction beta feature.""" llm = ChatAnthropic( model="claude-opus-4-6", # type: ignore[call-arg] betas=["compact-2026-01-12"], diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py index 219a3a748e8..153b73965be 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py @@ -182,13 +182,13 @@ def test_function_calling(output_version: Literal["v0", "responses/v1", "v1"]) - llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version) bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}]) - ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4")) + ai_msg = cast(AIMessage, bound_llm.invoke("what's 5 * 4")) assert len(ai_msg.tool_calls) == 1 assert ai_msg.tool_calls[0]["name"] == "multiply" assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"} full: Any = None - for chunk in bound_llm.stream("whats 5 * 4"): + for chunk in bound_llm.stream("what's 5 * 4"): assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert len(full.tool_calls) == 1 @@ -416,7 +416,7 @@ def test_function_calling_and_structured_output(schema: Any) -> None: assert parsed == response.additional_kwargs["parsed"] # Test function calling - ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4")) + ai_msg = cast(AIMessage, bound_llm.invoke("what's 5 * 4")) assert len(ai_msg.tool_calls) == 1 assert ai_msg.tool_calls[0]["name"] == "multiply" assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"} @@ -555,7 +555,7 @@ def test_stream_reasoning_summary( ) message_1 = { "role": "user", - "content": "What was the third tallest buliding in the year 2000?", + "content": "What was the third tallest building in the year 2000?", } response_1: BaseMessageChunk | None = None for chunk in llm.stream([message_1]):