diff --git a/cookbook/img-to_img-search_CLIP_ChromaDB.ipynb b/cookbook/img-to_img-search_CLIP_ChromaDB.ipynb index 7e11d4b9dc3..dc8fe939cdd 100644 --- a/cookbook/img-to_img-search_CLIP_ChromaDB.ipynb +++ b/cookbook/img-to_img-search_CLIP_ChromaDB.ipynb @@ -358,7 +358,7 @@ "id": "6e5cd014-db86-4d6b-8399-25cae3da5570", "metadata": {}, "source": [ - "## Helper function to plot retrived similar images" + "## Helper function to plot retrieved similar images" ] }, { diff --git a/docs/docs/how_to/qa_citations.ipynb b/docs/docs/how_to/qa_citations.ipynb index 13d230315b8..8d0132eba45 100644 --- a/docs/docs/how_to/qa_citations.ipynb +++ b/docs/docs/how_to/qa_citations.ipynb @@ -127,7 +127,7 @@ "id": "c89e2045-9244-43e6-bf3f-59af22658529", "metadata": {}, "source": [ - "Now that we've got a [model](/docs/concepts/chat_models/), [retriver](/docs/concepts/retrievers/) and [prompt](/docs/concepts/prompt_templates/), let's chain them all together. Following the how-to guide on [adding citations](/docs/how_to/qa_citations) to a RAG application, we'll make it so our chain returns both the answer and the retrieved Documents. This uses the same [LangGraph](/docs/concepts/architecture/#langgraph) implementation as in the [RAG Tutorial](/docs/tutorials/rag)." + "Now that we've got a [model](/docs/concepts/chat_models/), [retriever](/docs/concepts/retrievers/) and [prompt](/docs/concepts/prompt_templates/), let's chain them all together. Following the how-to guide on [adding citations](/docs/how_to/qa_citations) to a RAG application, we'll make it so our chain returns both the answer and the retrieved Documents. This uses the same [LangGraph](/docs/concepts/architecture/#langgraph) implementation as in the [RAG Tutorial](/docs/tutorials/rag)." ] }, { diff --git a/docs/docs/how_to/query_multiple_queries.ipynb b/docs/docs/how_to/query_multiple_queries.ipynb index b8f06ca6889..a01100c9f72 100644 --- a/docs/docs/how_to/query_multiple_queries.ipynb +++ b/docs/docs/how_to/query_multiple_queries.ipynb @@ -270,7 +270,7 @@ "source": [ "## Retrieval with query analysis\n", "\n", - "So how would we include this in a chain? One thing that will make this a lot easier is if we call our retriever asyncronously - this will let us loop over the queries and not get blocked on the response time." + "So how would we include this in a chain? One thing that will make this a lot easier is if we call our retriever asynchronously - this will let us loop over the queries and not get blocked on the response time." ] }, { diff --git a/docs/docs/how_to/summarize_map_reduce.ipynb b/docs/docs/how_to/summarize_map_reduce.ipynb index 7518ef7ac28..4eb6521b6bf 100644 --- a/docs/docs/how_to/summarize_map_reduce.ipynb +++ b/docs/docs/how_to/summarize_map_reduce.ipynb @@ -24,7 +24,7 @@ "\n", "Note that the map step is typically parallelized over the input documents. This strategy is especially effective when understanding of a sub-document does not rely on preceeding context. For example, when summarizing a corpus of many, shorter documents.\n", "\n", - "[LangGraph](https://langchain-ai.github.io/langgraph/), built on top of `langchain-core`, suports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows and is well-suited to this problem:\n", + "[LangGraph](https://langchain-ai.github.io/langgraph/), built on top of `langchain-core`, supports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows and is well-suited to this problem:\n", "\n", "- LangGraph allows for individual steps (such as successive summarizations) to be streamed, allowing for greater control of execution;\n", "- LangGraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) supports error recovery, extending with human-in-the-loop workflows, and easier incorporation into conversational applications.\n", diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index 664e786de3a..fa051dd7ca6 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -48,7 +48,7 @@ "print(f\"Response provided by LLM with system prompt set is : {sys_resp}\")\n", "\n", "# The top_responses parameter can give multiple responses based on its parameter value\n", - "# This below code retrive top 10 miner's response all the response are in format of json\n", + "# This below code retrieve top 10 miner's response all the response are in format of json\n", "\n", "# Json response structure is\n", "\"\"\" {\n", diff --git a/docs/docs/integrations/retrievers/zotero.ipynb b/docs/docs/integrations/retrievers/zotero.ipynb index bed2330996f..d11ee1517c9 100644 --- a/docs/docs/integrations/retrievers/zotero.ipynb +++ b/docs/docs/integrations/retrievers/zotero.ipynb @@ -198,7 +198,7 @@ "\n", " Args:\n", " query: str: The search query to be used. Try to keep this specific and short, e.g. a specific topic or author name\n", - " itemType: Optional. Type of item to search for (e.g. \"book\" or \"journalArticle\"). Multiple types can be passed as a string seperated by \"||\", e.g. \"book || journalArticle\". Defaults to all types.\n", + " itemType: Optional. Type of item to search for (e.g. \"book\" or \"journalArticle\"). Multiple types can be passed as a string separated by \"||\", e.g. \"book || journalArticle\". Defaults to all types.\n", " tag: Optional. For searching over tags attached to library items. If documents tagged with multiple tags are to be retrieved, pass them as a list. If documents with any of the tags are to be retrieved, pass them as a string separated by \"||\", e.g. \"tag1 || tag2\"\n", " qmode: Search mode to use. Changes what the query searches over. \"everything\" includes full-text content. \"titleCreatorYear\" to search over title, authors and year. Defaults to \"everything\".\n", " since: Return only objects modified after the specified library version. Defaults to return everything.\n", diff --git a/docs/docs/integrations/tools/cassandra_database.ipynb b/docs/docs/integrations/tools/cassandra_database.ipynb index 3370b079dc2..018fd21527a 100644 --- a/docs/docs/integrations/tools/cassandra_database.ipynb +++ b/docs/docs/integrations/tools/cassandra_database.ipynb @@ -70,7 +70,7 @@ "Gathers all schema information for the connected database or a specific schema. Critical for the agent when determining actions. \n", "\n", "### `cassandra_db_select_table_data`\n", - "Selects data from a specific keyspace and table. The agent can pass paramaters for a predicate and limits on the number of returned records. \n", + "Selects data from a specific keyspace and table. The agent can pass parameters for a predicate and limits on the number of returned records. \n", "\n", "### `cassandra_db_query`\n", "Expiriemental alternative to `cassandra_db_select_table_data` which takes a query string completely formed by the agent instead of parameters. *Warning*: This can lead to unusual queries that may not be as performant(or even work). This may be removed in future releases. If it does something cool, we want to know about that too. You never know!" diff --git a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb index 86591c08c18..c1f5d394455 100644 --- a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb @@ -123,7 +123,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, Query and retrive data" + "Finally, Query and retrieve data" ] }, { diff --git a/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb index 1181156f3d9..7796530eb84 100644 --- a/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb @@ -123,7 +123,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, Query and retrive data" + "Finally, Query and retrieve data" ] }, { diff --git a/docs/docs/integrations/vectorstores/vearch.ipynb b/docs/docs/integrations/vectorstores/vearch.ipynb index d5e40fdc4c1..473828e1404 100644 --- a/docs/docs/integrations/vectorstores/vearch.ipynb +++ b/docs/docs/integrations/vectorstores/vearch.ipynb @@ -436,7 +436,7 @@ } ], "source": [ - "##delete and get function need to maintian docids\n", + "##delete and get function need to maintain docids\n", "##your docid\n", "\n", "res_d = vearch_standalone.delete(\n", diff --git a/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb b/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb index 4f03f98092e..bc305ca0bb7 100644 --- a/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb +++ b/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb @@ -19,7 +19,7 @@ "\n", "In the reduce step, `MapReduceDocumentsChain` supports a recursive \"collapsing\" of the summaries: the inputs would be partitioned based on a token limit, and summaries would be generated of the partitions. This step would be repeated until the total length of the summaries was within a desired limit, allowing for the summarization of arbitrary-length text. This is particularly useful for models with smaller context windows.\n", "\n", - "LangGraph suports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows, and confers a number of advantages for this problem:\n", + "LangGraph supports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows, and confers a number of advantages for this problem:\n", "\n", "- LangGraph allows for individual steps (such as successive summarizations) to be streamed, allowing for greater control of execution;\n", "- LangGraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) supports error recovery, extending with human-in-the-loop workflows, and easier incorporation into conversational applications.\n", diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index 15a765148ca..bec0d7ec57f 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -244,7 +244,7 @@ def _messages_to_prompt_dict( else: raise ChatPremAPIError("No such role explicitly exists") - # do a seperate search for tool calls + # do a separate search for tool calls tool_prompt = "" for input_msg in input_messages: if isinstance(input_msg, ToolMessage): diff --git a/libs/community/langchain_community/vectorstores/documentdb.py b/libs/community/langchain_community/vectorstores/documentdb.py index d6a96872838..8730493b9d6 100644 --- a/libs/community/langchain_community/vectorstores/documentdb.py +++ b/libs/community/langchain_community/vectorstores/documentdb.py @@ -327,7 +327,7 @@ class DocumentDBVectorSearch(VectorStore): Returns: A list of documents closest to the query vector """ - # $match can't be null, so intializes to {} when None to avoid + # $match can't be null, so initializes to {} when None to avoid # "the match filter must be an expression in an object" if not filter: filter = {} diff --git a/libs/community/langchain_community/vectorstores/faiss.py b/libs/community/langchain_community/vectorstores/faiss.py index 3f777424315..19540c02ffa 100644 --- a/libs/community/langchain_community/vectorstores/faiss.py +++ b/libs/community/langchain_community/vectorstores/faiss.py @@ -1352,7 +1352,7 @@ class FAISS(VectorStore): satisfies the filter conditions, otherwise False. Raises: - ValueError: If the filter is invalid or contains unsuported operators. + ValueError: If the filter is invalid or contains unsupported operators. """ if callable(filter): return filter diff --git a/libs/community/langchain_community/vectorstores/falkordb_vector.py b/libs/community/langchain_community/vectorstores/falkordb_vector.py index d3a74177f93..0ed8d9e39d7 100644 --- a/libs/community/langchain_community/vectorstores/falkordb_vector.py +++ b/libs/community/langchain_community/vectorstores/falkordb_vector.py @@ -690,7 +690,7 @@ class FalkorDBVector(VectorStore): "}) has already been created" ) else: - raise ValueError(f"Error occured: {e}") + raise ValueError(f"Error occurred: {e}") def create_new_index_on_relationship( self, @@ -739,7 +739,7 @@ class FalkorDBVector(VectorStore): "}] has already been created" ) else: - raise ValueError(f"Error occured: {e}") + raise ValueError(f"Error occurred: {e}") def create_new_keyword_index(self, text_node_properties: List[str] = []) -> None: """ @@ -1152,7 +1152,7 @@ class FalkorDBVector(VectorStore): Args: embedding: The `Embeddings` model you would like to use database: The name of the existing graph/database you - would like to intialize + would like to initialize node_label: The label of the node you want to initialize. embedding_node_property: The name of the property you want your embeddings to be stored in. @@ -1633,7 +1633,7 @@ class FalkorDBVector(VectorStore): for result in sorted_results ] except Exception as e: - raise ValueError(f"An error occured: {e}") + raise ValueError(f"An error occurred: {e}") return docs diff --git a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py index b75c920224a..2c7614f9b24 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py +++ b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py @@ -82,7 +82,7 @@ class TestAzureCosmosDBVectorSearch: if not os.getenv("AZURE_OPENAI_API_VERSION"): raise ValueError("AZURE_OPENAI_API_VERSION environment variable is not set") - # insure the test collection is empty + # ensure the test collection is empty collection = prepare_collection() assert collection.count_documents({}) == 0 # type: ignore[index] diff --git a/libs/community/tests/integration_tests/vectorstores/test_documentdb.py b/libs/community/tests/integration_tests/vectorstores/test_documentdb.py index 8db59d292fc..acb2670d1b3 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_documentdb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_documentdb.py @@ -69,7 +69,7 @@ class TestDocumentDBVectorSearch: if not os.getenv("OPENAI_API_KEY"): raise ValueError("OPENAI_API_KEY environment variable is not set") - # insure the test collection is empty + # ensure the test collection is empty collection = prepare_collection() assert collection.count_documents({}) == 0 # type: ignore[index] diff --git a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py index 370fe9b43be..cbb3780102b 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py @@ -170,7 +170,7 @@ def test_hanavector_table_with_missing_columns() -> None: cur.close() # Check if table is created - exception_occured = False + exception_occurred = False try: HanaDB( connection=test_setup.conn, @@ -178,10 +178,10 @@ def test_hanavector_table_with_missing_columns() -> None: distance_strategy=DistanceStrategy.COSINE, table_name=table_name, ) - exception_occured = False + exception_occurred = False except AttributeError: - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -243,7 +243,7 @@ def test_hanavector_table_with_wrong_typed_columns() -> None: cur.close() # Check if table is created - exception_occured = False + exception_occurred = False try: HanaDB( connection=test_setup.conn, @@ -251,11 +251,11 @@ def test_hanavector_table_with_wrong_typed_columns() -> None: distance_strategy=DistanceStrategy.COSINE, table_name=table_name, ) - exception_occured = False + exception_occurred = False except AttributeError as err: print(err) # noqa: T201 - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -521,12 +521,12 @@ def test_hanavector_similarity_search_with_metadata_filter_invalid_type( table_name=table_name, ) - exception_occured = False + exception_occurred = False try: vectorDB.similarity_search(texts[0], 3, filter={"wrong_type": 0.1}) except ValueError: - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -727,20 +727,20 @@ def test_hanavector_delete_called_wrong( ) # Delete without filter parameter - exception_occured = False + exception_occurred = False try: vectorDB.delete() except ValueError: - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred # Delete with ids parameter - exception_occured = False + exception_occurred = False try: vectorDB.delete(ids=["id1", "id"], filter={"start": 100, "end": 200}) except ValueError: - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -884,7 +884,7 @@ def test_invalid_metadata_keys(texts: List[str], metadatas: List[dict]) -> None: invalid_metadatas = [ {"sta rt": 0, "end": 100, "quality": "good", "ready": True}, ] - exception_occured = False + exception_occurred = False try: HanaDB.from_texts( connection=test_setup.conn, @@ -894,13 +894,13 @@ def test_invalid_metadata_keys(texts: List[str], metadatas: List[dict]) -> None: table_name=table_name, ) except ValueError: - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred invalid_metadatas = [ {"sta/nrt": 0, "end": 100, "quality": "good", "ready": True}, ] - exception_occured = False + exception_occurred = False try: HanaDB.from_texts( connection=test_setup.conn, @@ -910,8 +910,8 @@ def test_invalid_metadata_keys(texts: List[str], metadatas: List[dict]) -> None: table_name=table_name, ) except ValueError: - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -1361,7 +1361,7 @@ def test_preexisting_specific_columns_for_metadata_wrong_type_or_non_existing( cur.close() # Check if table is created - exception_occured = False + exception_occurred = False try: HanaDB.from_texts( connection=test_setup.conn, @@ -1371,12 +1371,12 @@ def test_preexisting_specific_columns_for_metadata_wrong_type_or_non_existing( table_name=table_name, specific_metadata_columns=["quality"], ) - exception_occured = False + exception_occurred = False except dbapi.Error: # Nothing we should do here, hdbcli will throw an error - exception_occured = True - assert exception_occured # Check if table is created + exception_occurred = True + assert exception_occurred # Check if table is created - exception_occured = False + exception_occurred = False try: HanaDB.from_texts( connection=test_setup.conn, @@ -1386,10 +1386,10 @@ def test_preexisting_specific_columns_for_metadata_wrong_type_or_non_existing( table_name=table_name, specific_metadata_columns=["NonExistingColumn"], ) - exception_occured = False + exception_occurred = False except AttributeError: # Nothing we should do here, hdbcli will throw an error - exception_occured = True - assert exception_occured + exception_occurred = True + assert exception_occurred @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") diff --git a/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py b/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py index 5be2216cd8e..a9c52d128ac 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py +++ b/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py @@ -33,7 +33,7 @@ def collection() -> Any: class TestMongoDBAtlasVectorSearch: @classmethod def setup_class(cls) -> None: - # insure the test collection is empty + # ensure the test collection is empty collection = get_collection() assert collection.count_documents({}) == 0 # type: ignore[index] diff --git a/libs/community/tests/integration_tests/vectorstores/test_pinecone.py b/libs/community/tests/integration_tests/vectorstores/test_pinecone.py index 99a42dc5162..dad43f60702 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_pinecone.py +++ b/libs/community/tests/integration_tests/vectorstores/test_pinecone.py @@ -59,7 +59,7 @@ class TestPinecone: else: pinecone.create_index(name=index_name, dimension=dimension) - # insure the index is empty + # ensure the index is empty index_stats = cls.index.describe_index_stats() assert index_stats["dimension"] == dimension if index_stats["namespaces"].get(namespace_name) is not None: diff --git a/libs/partners/openai/tests/unit_tests/embeddings/test_azure_embeddings.py b/libs/partners/openai/tests/unit_tests/embeddings/test_azure_embeddings.py index 8ce50eaaf60..cd7e4a4202c 100644 --- a/libs/partners/openai/tests/unit_tests/embeddings/test_azure_embeddings.py +++ b/libs/partners/openai/tests/unit_tests/embeddings/test_azure_embeddings.py @@ -15,7 +15,7 @@ def test_initialize_azure_openai() -> None: assert embeddings.model == "text-embedding-large" -def test_intialize_azure_openai_with_base_set() -> None: +def test_initialize_azure_openai_with_base_set() -> None: with mock.patch.dict(os.environ, {"OPENAI_API_BASE": "https://api.openai.com"}): embeddings = AzureOpenAIEmbeddings( # type: ignore[call-arg, call-arg] model="text-embedding-large",