diff --git a/docs/api_reference/create_api_rst.py b/docs/api_reference/create_api_rst.py index 448a3c4d6f6..e323ac33d0f 100644 --- a/docs/api_reference/create_api_rst.py +++ b/docs/api_reference/create_api_rst.py @@ -78,7 +78,7 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers: continue if inspect.isclass(type_): - # The clasification of the class is used to select a template + # The type of the class is used to select a template # for the object when rendering the documentation. # See `templates` directory for defined templates. # This is a hacky solution to distinguish between different diff --git a/docs/docs/concepts.mdx b/docs/docs/concepts.mdx index 56f94c16b29..20772f96783 100644 --- a/docs/docs/concepts.mdx +++ b/docs/docs/concepts.mdx @@ -821,7 +821,7 @@ We recommend this method as a starting point when working with structured output - If multiple underlying techniques are supported, you can supply a `method` parameter to [toggle which one is used](/docs/how_to/structured_output/#advanced-specifying-the-method-for-structuring-outputs). -You may want or need to use other techiniques if: +You may want or need to use other techniques if: - The chat model you are using does not support tool calling. - You are working with very complex schemas and the model is having trouble generating outputs that conform. diff --git a/docs/docs/how_to/index.mdx b/docs/docs/how_to/index.mdx index f80f95dabee..88477cb22c3 100644 --- a/docs/docs/how_to/index.mdx +++ b/docs/docs/how_to/index.mdx @@ -84,7 +84,7 @@ These are the core building blocks you can use when building applications. - [How to: use chat model to call tools](/docs/how_to/tool_calling) - [How to: stream tool calls](/docs/how_to/tool_streaming) - [How to: few shot prompt tool behavior](/docs/how_to/tools_few_shot) -- [How to: bind model-specific formated tools](/docs/how_to/tools_model_specific) +- [How to: bind model-specific formatted tools](/docs/how_to/tools_model_specific) - [How to: force a specific tool call](/docs/how_to/tool_choice) - [How to: init any model in one line](/docs/how_to/chat_models_universal_init/) diff --git a/docs/docs/integrations/providers/nvidia.mdx b/docs/docs/integrations/providers/nvidia.mdx index 70f1123c414..0f02b352236 100644 --- a/docs/docs/integrations/providers/nvidia.mdx +++ b/docs/docs/integrations/providers/nvidia.mdx @@ -61,7 +61,7 @@ When ready to deploy, you can self-host models with NVIDIA NIM—which is includ ```python from langchain_nvidia_ai_endpoints import ChatNVIDIA, NVIDIAEmbeddings, NVIDIARerank -# connect to an chat NIM running at localhost:8000, specifyig a specific model +# connect to a chat NIM running at localhost:8000, specifying a model llm = ChatNVIDIA(base_url="http://localhost:8000/v1", model="meta/llama3-8b-instruct") # connect to an embedding NIM running at localhost:8080 diff --git a/docs/docs/integrations/providers/premai.md b/docs/docs/integrations/providers/premai.md index eeb0ca72552..e0592ee7666 100644 --- a/docs/docs/integrations/providers/premai.md +++ b/docs/docs/integrations/providers/premai.md @@ -202,7 +202,7 @@ Prem Templates are also available for Streaming too. ## Prem Embeddings -In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key. +In this section we cover how we can get access to different embedding models using `PremEmbeddings` with LangChain. Let's start by importing our modules and setting our API Key. ```python import os diff --git a/libs/community/langchain_community/cross_encoders/huggingface.py b/libs/community/langchain_community/cross_encoders/huggingface.py index 0a1229bb033..f9797f4d546 100644 --- a/libs/community/langchain_community/cross_encoders/huggingface.py +++ b/libs/community/langchain_community/cross_encoders/huggingface.py @@ -60,7 +60,7 @@ class HuggingFaceCrossEncoder(BaseModel, BaseCrossEncoder): List of scores, one for each pair. """ scores = self.client.predict(text_pairs) - # Somes models e.g bert-multilingual-passage-reranking-msmarco + # Some models e.g bert-multilingual-passage-reranking-msmarco # gives two score not_relevant and relevant as compare with the query. if len(scores.shape) > 1: # we are going to get the relevant scores scores = map(lambda x: x[1], scores) diff --git a/libs/community/langchain_community/embeddings/ascend.py b/libs/community/langchain_community/embeddings/ascend.py index 7512599bc53..2b94f4ff668 100644 --- a/libs/community/langchain_community/embeddings/ascend.py +++ b/libs/community/langchain_community/embeddings/ascend.py @@ -60,7 +60,7 @@ class AscendEmbeddings(Embeddings, BaseModel): raise ValueError("model_path is required") if not os.access(values["model_path"], os.F_OK): raise FileNotFoundError( - f"Unabled to find valid model path in [{values['model_path']}]" + f"Unable to find valid model path in [{values['model_path']}]" ) try: import torch_npu diff --git a/libs/community/langchain_community/storage/sql.py b/libs/community/langchain_community/storage/sql.py index a92daae1d8c..170372f4e74 100644 --- a/libs/community/langchain_community/storage/sql.py +++ b/libs/community/langchain_community/storage/sql.py @@ -72,7 +72,7 @@ class SQLStore(BaseStore[str, bytes]): from langchain_rag.storage import SQLStore # Instantiate the SQLStore with the root path - sql_store = SQLStore(namespace="test", db_url="sqllite://:memory:") + sql_store = SQLStore(namespace="test", db_url="sqlite://:memory:") # Set values for keys sql_store.mset([("key1", b"value1"), ("key2", b"value2")]) diff --git a/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py b/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py index 9f28fce75a7..238ff93b0bb 100644 --- a/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py +++ b/libs/community/tests/integration_tests/tools/zenguard/test_zenguard.py @@ -9,7 +9,7 @@ from langchain_community.tools.zenguard.tool import Detector, ZenGuardTool @pytest.fixture() def zenguard_tool() -> ZenGuardTool: if os.getenv("ZENGUARD_API_KEY") is None: - raise ValueError("ZENGUARD_API_KEY is not set in environment varibale") + raise ValueError("ZENGUARD_API_KEY is not set in environment variable") return ZenGuardTool() diff --git a/libs/community/tests/unit_tests/graph_vectorstores/extractors/test_html_link_extractor.py b/libs/community/tests/unit_tests/graph_vectorstores/extractors/test_html_link_extractor.py index d94eec76c6c..478285d9d25 100644 --- a/libs/community/tests/unit_tests/graph_vectorstores/extractors/test_html_link_extractor.py +++ b/libs/community/tests/unit_tests/graph_vectorstores/extractors/test_html_link_extractor.py @@ -12,7 +12,7 @@ PAGE_1 = """ Hello. Relative Relative base. -Aboslute +Absolute Test diff --git a/libs/core/langchain_core/documents/base.py b/libs/core/langchain_core/documents/base.py index d83a8cd6006..a5f8ffbc39f 100644 --- a/libs/core/langchain_core/documents/base.py +++ b/libs/core/langchain_core/documents/base.py @@ -15,7 +15,7 @@ PathLike = Union[str, PurePath] class BaseMedia(Serializable): """Use to represent media content. - Media objets can be used to represent raw data, such as text or binary data. + Media objects can be used to represent raw data, such as text or binary data. LangChain Media objects allow associating metadata and an optional identifier with the content. diff --git a/libs/core/tests/unit_tests/language_models/chat_models/test_cache.py b/libs/core/tests/unit_tests/language_models/chat_models/test_cache.py index 19a16e404b7..99ab278addc 100644 --- a/libs/core/tests/unit_tests/language_models/chat_models/test_cache.py +++ b/libs/core/tests/unit_tests/language_models/chat_models/test_cache.py @@ -279,7 +279,7 @@ class CustomChat(GenericFakeChatModel): async def test_can_swap_caches() -> None: """Test that we can use a different cache object. - This test verifies that when we fetch teh llm_string representation + This test verifies that when we fetch the llm_string representation of the chat model, we can swap the cache object and still get the same result. """ diff --git a/libs/text-splitters/langchain_text_splitters/character.py b/libs/text-splitters/langchain_text_splitters/character.py index 7bab0005702..3d8a8601c14 100644 --- a/libs/text-splitters/langchain_text_splitters/character.py +++ b/libs/text-splitters/langchain_text_splitters/character.py @@ -345,7 +345,7 @@ class RecursiveCharacterTextSplitter(TextSplitter): ] elif language == Language.ELIXIR: return [ - # Split along method function and module definiton + # Split along method function and module definition "\ndef ", "\ndefp ", "\ndefmodule ",