fix(docs): Fix several typos and grammar (#33487)

Just typo changes

Co-authored-by: Mason Daugherty <mason@langchain.dev>
This commit is contained in:
Chenyang Li
2025-10-14 20:04:14 -04:00
committed by GitHub
parent 68ceeb64f6
commit 6e25e185f6
8 changed files with 11 additions and 11 deletions

View File

@@ -11,7 +11,7 @@ pip install langchain-core
## What is it?
LangChain Core contains the base abstractions that power the the LangChain ecosystem.
LangChain Core contains the base abstractions that power the LangChain ecosystem.
These abstractions are designed to be as modular and simple as possible.

View File

@@ -69,7 +69,7 @@ Passage:
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
@@ -139,7 +139,7 @@ def create_extraction_chain(
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)

View File

@@ -73,7 +73,7 @@ def create_tagging_chain(
punchline: Annotated[str, ..., "The punchline of the joke"]
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0)
@@ -149,7 +149,7 @@ def create_tagging_chain_pydantic(
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)

View File

@@ -40,7 +40,7 @@ If a property is not present and is not required in the function parameters, do
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)

View File

@@ -53,7 +53,7 @@ from pydantic import BaseModel
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
@@ -172,7 +172,7 @@ def create_openai_fn_runnable(
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# Please reference to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)

View File

@@ -386,7 +386,7 @@ def test_streaming_generation_info() -> None:
)
list(chat.stream("Respond with the single word Hello", stop=["o"]))
generation = callback.saved_things["generation"]
# `Hello!` is two tokens, assert that that is what is returned
# `Hello!` is two tokens, assert that is what is returned
assert isinstance(generation, LLMResult)
assert generation.generations[0][0].text == "Hell"

View File

@@ -116,7 +116,7 @@ def test_chat_openai_streaming_generation_info() -> None:
chat = _get_llm(max_tokens=2, temperature=0, callbacks=callback_manager)
list(chat.stream("hi"))
generation = callback.saved_things["generation"]
# `Hello!` is two tokens, assert that that is what is returned
# `Hello!` is two tokens, assert that is what is returned
assert generation.generations[0][0].text == "Hello!"

View File

@@ -141,7 +141,7 @@ def test_chat_openai_streaming_generation_info() -> None:
chat = ChatOpenAI(max_tokens=2, temperature=0, callbacks=callback_manager) # type: ignore[call-arg]
list(chat.stream("hi"))
generation = callback.saved_things["generation"]
# `Hello!` is two tokens, assert that that is what is returned
# `Hello!` is two tokens, assert that is what is returned
assert generation.generations[0][0].text == "Hello!"