docs: Spell check fixes (#24217)

**Description:** Spell check fixes for docs, comments, and a couple of
strings. No code change e.g. variable names.
**Issue:** none
**Dependencies:** none
**Twitter handle:** hmartin
This commit is contained in:
Harold Martin
2024-07-15 08:51:43 -07:00
committed by GitHub
parent cacdf96f9c
commit ccdaf14eff
13 changed files with 13 additions and 13 deletions

View File

@@ -78,7 +78,7 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
continue
if inspect.isclass(type_):
# The clasification of the class is used to select a template
# The type of the class is used to select a template
# for the object when rendering the documentation.
# See `templates` directory for defined templates.
# This is a hacky solution to distinguish between different

View File

@@ -821,7 +821,7 @@ We recommend this method as a starting point when working with structured output
- If multiple underlying techniques are supported, you can supply a `method` parameter to
[toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs).
You may want or need to use other techiniques if:
You may want or need to use other techniques if:
- The chat model you are using does not support tool calling.
- You are working with very complex schemas and the model is having trouble generating outputs that conform.

View File

@@ -84,7 +84,7 @@ These are the core building blocks you can use when building applications.
- [How to: use chat model to call tools](/docs/how_to/tool_calling)
- [How to: stream tool calls](/docs/how_to/tool_streaming)
- [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot)
- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific)
- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific)
- [How to: force a specific tool call](/docs/how_to/tool_choice)
- [How to: init any model in one line](/docs/how_to/chat_models_universal_init/)

View File

@@ -61,7 +61,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ
```python
from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank
# connect to an chat NIM running at localhost:8000, specifyig a specific model
# connect to a chat NIM running at localhost:8000, specifying a model
llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct")
# connect to an embedding NIM running at localhost:8080

View File

@@ -202,7 +202,7 @@ Prem Templates are also available for Streaming too.
## Prem Embeddings
In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key.
In this section we cover how we can get access to different embedding models using `PremEmbeddings` with LangChain. Let's start by importing our modules and setting our API Key.
```python
import os

View File

@@ -60,7 +60,7 @@ class HuggingFaceCrossEncoder(BaseModel, BaseCrossEncoder):
List of scores, one for each pair.
"""
scores = self.client.predict(text_pairs)
# Somes models e.g bert-multilingual-passage-reranking-msmarco
# Some models e.g bert-multilingual-passage-reranking-msmarco
# gives two score not_relevant and relevant as compare with the query.
if len(scores.shape) > 1: # we are going to get the relevant scores
scores = map(lambda x: x[1], scores)

View File

@@ -60,7 +60,7 @@ class AscendEmbeddings(Embeddings, BaseModel):
raise ValueError("model_path is required")
if not os.access(values["model_path"], os.F_OK):
raise FileNotFoundError(
f"Unabled to find valid model path in [{values['model_path']}]"
f"Unable to find valid model path in [{values['model_path']}]"
)
try:
import torch_npu

View File

@@ -72,7 +72,7 @@ class SQLStore(BaseStore[str, bytes]):
from langchain_rag.storage import SQLStore
# Instantiate the SQLStore with the root path
sql_store = SQLStore(namespace="test", db_url="sqllite://:memory:")
sql_store = SQLStore(namespace="test", db_url="sqlite://:memory:")
# Set values for keys
sql_store.mset([("key1", b"value1"), ("key2", b"value2")])

View File

@@ -9,7 +9,7 @@ from langchain_community.tools.zenguard.tool import Detector, ZenGuardTool
@pytest.fixture()
def zenguard_tool() -> ZenGuardTool:
if os.getenv("ZENGUARD_API_KEY") is None:
raise ValueError("ZENGUARD_API_KEY is not set in environment varibale")
raise ValueError("ZENGUARD_API_KEY is not set in environment variable")
return ZenGuardTool()

View File

@@ -12,7 +12,7 @@ PAGE_1 = """
Hello.
<a href="relative">Relative</a>
<a href="/relative-base">Relative base.</a>
<a href="http://cnn.com">Aboslute</a>
<a href="http://cnn.com">Absolute</a>
<a href="//same.foo">Test</a>
</body>
</html>

View File

@@ -15,7 +15,7 @@ PathLike = Union[str, PurePath]
class BaseMedia(Serializable):
"""Use to represent media content.
Media objets can be used to represent raw data, such as text or binary data.
Media objects can be used to represent raw data, such as text or binary data.
LangChain Media objects allow associating metadata and an optional identifier
with the content.

View File

@@ -279,7 +279,7 @@ class CustomChat(GenericFakeChatModel):
async def test_can_swap_caches() -> None:
"""Test that we can use a different cache object.
This test verifies that when we fetch teh llm_string representation
This test verifies that when we fetch the llm_string representation
of the chat model, we can swap the cache object and still get the same
result.
"""

View File

@@ -345,7 +345,7 @@ class RecursiveCharacterTextSplitter(TextSplitter):
]
elif language == Language.ELIXIR:
return [
# Split along method function and module definiton
# Split along method function and module definition
"\ndef ",
"\ndefp ",
"\ndefmodule ",