fix(core): fixed typos in the documentation (#36459)

Fixes #36458 

Fixed typos in the documentation in the core module.
This commit is contained in:
jasiecky
2026-04-02 17:32:12 +02:00
committed by GitHub
parent cd394b70c1
commit c9f51aef85
6 changed files with 14 additions and 14 deletions

View File

@@ -1551,7 +1551,7 @@ def convert_to_openai_messages(
{
"role": "user",
"content": [
{"type": "text", "text": "whats in this"},
{"type": "text", "text": "what's in this"},
{
"type": "image_url",
"image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"},
@@ -1570,15 +1570,15 @@ def convert_to_openai_messages(
],
),
ToolMessage("foobar", tool_call_id="1", name="bar"),
{"role": "assistant", "content": "thats nice"},
{"role": "assistant", "content": "that's nice"},
]
oai_messages = convert_to_openai_messages(messages)
# -> [
# {'role': 'system', 'content': 'foo'},
# {'role': 'user', 'content': [{'type': 'text', 'text': 'whats in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]},
# {'role': 'user', 'content': [{'type': 'text', 'text': 'what's in this'}, {'type': 'image_url', 'image_url': {'url': "data:image/png;base64,'/9j/4AAQSk'"}}]},
# {'role': 'assistant', 'tool_calls': [{'type': 'function', 'id': '1','function': {'name': 'analyze', 'arguments': '{"baz": "buz"}'}}], 'content': ''},
# {'role': 'tool', 'name': 'bar', 'content': 'foobar'},
# {'role': 'assistant', 'content': 'thats nice'}
# {'role': 'assistant', 'content': 'that's nice'}
# ]
```

View File

@@ -3,7 +3,7 @@
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain `Runnable` objects inherently suppor
Programs created using LCEL and LangChain `Runnable` objects inherently support
synchronous asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale bette for

View File

@@ -1,6 +1,6 @@
"""Tools are classes that an Agent uses to interact with the world.
Each tool has a description. Agent uses the description to choose the righ tool for the
Each tool has a description. Agent uses the description to choose the right tool for the
job.
"""

View File

@@ -815,7 +815,7 @@ def test_parse_with_different_pydantic_2_v1() -> None:
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# Can't get pydantic to work here due to the odd typing of trying to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast])
message = AIMessage(
@@ -848,7 +848,7 @@ def test_parse_with_different_pydantic_2_proper() -> None:
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# Can't get pydantic to work here due to the odd typing of trying to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast])
message = AIMessage(

View File

@@ -2411,7 +2411,7 @@ def test_fine_grained_tool_streaming() -> None:
@pytest.mark.vcr
def test_compaction() -> None:
"""Test the compation beta feature."""
"""Test the compaction beta feature."""
llm = ChatAnthropic(
model="claude-opus-4-6", # type: ignore[call-arg]
betas=["compact-2026-01-12"],
@@ -2465,7 +2465,7 @@ def test_compaction() -> None:
@pytest.mark.vcr
def test_compaction_streaming() -> None:
"""Test the compation beta feature."""
"""Test the compaction beta feature."""
llm = ChatAnthropic(
model="claude-opus-4-6", # type: ignore[call-arg]
betas=["compact-2026-01-12"],

View File

@@ -182,13 +182,13 @@ def test_function_calling(output_version: Literal["v0", "responses/v1", "v1"]) -
llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version)
bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}])
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
ai_msg = cast(AIMessage, bound_llm.invoke("what's 5 * 4"))
assert len(ai_msg.tool_calls) == 1
assert ai_msg.tool_calls[0]["name"] == "multiply"
assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"}
full: Any = None
for chunk in bound_llm.stream("whats 5 * 4"):
for chunk in bound_llm.stream("what's 5 * 4"):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert len(full.tool_calls) == 1
@@ -416,7 +416,7 @@ def test_function_calling_and_structured_output(schema: Any) -> None:
assert parsed == response.additional_kwargs["parsed"]
# Test function calling
ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4"))
ai_msg = cast(AIMessage, bound_llm.invoke("what's 5 * 4"))
assert len(ai_msg.tool_calls) == 1
assert ai_msg.tool_calls[0]["name"] == "multiply"
assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"}
@@ -555,7 +555,7 @@ def test_stream_reasoning_summary(
)
message_1 = {
"role": "user",
"content": "What was the third tallest buliding in the year 2000?",
"content": "What was the third tallest building in the year 2000?",
}
response_1: BaseMessageChunk | None = None
for chunk in llm.stream([message_1]):