diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml index 3158dab5d7d..0b4bd4bd7d2 100644 --- a/.github/ISSUE_TEMPLATE/documentation.yml +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -26,6 +26,13 @@ body: [LangChain Github Discussions](https://github.com/langchain-ai/langchain/discussions), [LangChain Github Issues](https://github.com/langchain-ai/langchain/issues?q=is%3Aissue), [LangChain ChatBot](https://chat.langchain.com/) +- type: input + id: url + attributes: + label: URL + description: URL to documentation + validations: + required: false - type: checkboxes id: checks attributes: @@ -48,4 +55,4 @@ body: label: "Idea or request for content:" description: > Please describe as clearly as possible what topics you think are missing - from the current documentation. \ No newline at end of file + from the current documentation. diff --git a/.github/scripts/check_diff.py b/.github/scripts/check_diff.py index 3489018d802..e5fe87843bc 100644 --- a/.github/scripts/check_diff.py +++ b/.github/scripts/check_diff.py @@ -91,4 +91,4 @@ if __name__ == "__main__": } for key, value in outputs.items(): json_output = json.dumps(value) - print(f"{key}={json_output}") # noqa: T201 + print(f"{key}={json_output}") diff --git a/.github/scripts/get_min_versions.py b/.github/scripts/get_min_versions.py index a26cc021db0..e942b4b5280 100644 --- a/.github/scripts/get_min_versions.py +++ b/.github/scripts/get_min_versions.py @@ -76,4 +76,4 @@ if __name__ == "__main__": print( " ".join([f"{lib}=={version}" for lib, version in min_versions.items()]) - ) # noqa: T201 + ) diff --git a/.github/workflows/.codespell-exclude b/.github/workflows/.codespell-exclude new file mode 100644 index 00000000000..d74ecbfb99d --- /dev/null +++ b/.github/workflows/.codespell-exclude @@ -0,0 +1,7 @@ +libs/community/langchain_community/llms/yuan2.py +"NotIn": "not in", +- `/checkin`: Check-in +docs/docs/integrations/providers/trulens.mdx +self.assertIn( +from trulens_eval import Tru +tru = Tru() diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 3034129009e..85a79b22997 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -72,10 +72,67 @@ jobs: run: | echo pkg-name="$(poetry version | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT echo version="$(poetry version --short)" >> $GITHUB_OUTPUT + release-notes: + needs: + - build + runs-on: ubuntu-latest + outputs: + release-body: ${{ steps.generate-release-body.outputs.release-body }} + steps: + - uses: actions/checkout@v4 + with: + repository: langchain-ai/langchain + path: langchain + sparse-checkout: | # this only grabs files for relevant dir + ${{ inputs.working-directory }} + ref: master # this scopes to just master branch + fetch-depth: 0 # this fetches entire commit history + - name: Check Tags + id: check-tags + shell: bash + working-directory: langchain/${{ inputs.working-directory }} + env: + PKG_NAME: ${{ needs.build.outputs.pkg-name }} + VERSION: ${{ needs.build.outputs.version }} + run: | + REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$" + echo $REGEX + PREV_TAG=$(git tag --sort=-creatordate | grep -P $REGEX || true | head -1) + TAG="${PKG_NAME}==${VERSION}" + if [ "$TAG" == "$PREV_TAG" ]; then + echo "No new version to release" + exit 1 + fi + echo tag="$TAG" >> $GITHUB_OUTPUT + echo prev-tag="$PREV_TAG" >> $GITHUB_OUTPUT + - name: Generate release body + id: generate-release-body + working-directory: langchain + env: + WORKING_DIR: ${{ inputs.working-directory }} + PKG_NAME: ${{ needs.build.outputs.pkg-name }} + TAG: ${{ steps.check-tags.outputs.tag }} + PREV_TAG: ${{ steps.check-tags.outputs.prev-tag }} + run: | + PREAMBLE="Changes since $PREV_TAG" + # if PREV_TAG is empty, then we are releasing the first version + if [ -z "$PREV_TAG" ]; then + PREAMBLE="Initial release" + PREV_TAG=$(git rev-list --max-parents=0 HEAD) + fi + { + echo 'release-body<> "$GITHUB_OUTPUT" test-pypi-publish: needs: - build + - release-notes uses: ./.github/workflows/_test_release.yml with: @@ -86,6 +143,7 @@ jobs: pre-release-checks: needs: - build + - release-notes - test-pypi-publish runs-on: ubuntu-latest steps: @@ -229,6 +287,7 @@ jobs: publish: needs: - build + - release-notes - test-pypi-publish - pre-release-checks runs-on: ubuntu-latest @@ -270,6 +329,7 @@ jobs: mark-release: needs: - build + - release-notes - test-pypi-publish - pre-release-checks - publish @@ -306,6 +366,6 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} generateReleaseNotes: false tag: ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }} - body: "# Release ${{needs.build.outputs.pkg-name}}==${{ needs.build.outputs.version }}\n\nPackage-specific release note generation coming soon." + body: ${{ needs.release-notes.outputs.release-body }} commit: ${{ github.sha }} makeLatest: ${{ needs.build.outputs.pkg-name == 'langchain-core'}} diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 03b97fe0a73..3778a8630d5 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -29,9 +29,9 @@ jobs: python .github/workflows/extract_ignored_words_list.py id: extract_ignore_words - - name: Codespell - uses: codespell-project/actions-codespell@v2 - with: - skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock - ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }} - exclude_file: libs/community/langchain_community/llms/yuan2.py +# - name: Codespell +# uses: codespell-project/actions-codespell@v2 +# with: +# skip: guide_imports.json,*.ambr,./cookbook/data/imdb_top_1000.csv,*.lock +# ignore_words_list: ${{ steps.extract_ignore_words.outputs.ignore_words_list }} +# exclude_file: ./.github/workflows/codespell-exclude diff --git a/.github/workflows/extract_ignored_words_list.py b/.github/workflows/extract_ignored_words_list.py index bb949d14f79..7c800e0df0b 100644 --- a/.github/workflows/extract_ignored_words_list.py +++ b/.github/workflows/extract_ignored_words_list.py @@ -7,4 +7,4 @@ ignore_words_list = ( pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list") ) -print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201 +print(f"::set-output name=ignore_words_list::{ignore_words_list}") diff --git a/.gitignore b/.gitignore index aed12c91c6a..1b5b4c52c9b 100644 --- a/.gitignore +++ b/.gitignore @@ -178,3 +178,4 @@ _dist docs/docs/templates prof +virtualenv/ diff --git a/Makefile b/Makefile index 3acded731db..1dc85475053 100644 --- a/Makefile +++ b/Makefile @@ -32,10 +32,16 @@ api_docs_build: poetry run python docs/api_reference/create_api_rst.py cd docs/api_reference && poetry run make html +api_docs_quick_preview: + poetry run python docs/api_reference/create_api_rst.py text-splitters + cd docs/api_reference && poetry run make html + open docs/api_reference/_build/html/text_splitters_api_reference.html + ## api_docs_clean: Clean the API Reference documentation build artifacts. api_docs_clean: find ./docs/api_reference -name '*_api_reference.rst' -delete - cd docs/api_reference && poetry run make clean + git clean -fdX ./docs/api_reference + ## api_docs_linkcheck: Run linkchecker on the API Reference documentation. api_docs_linkcheck: diff --git a/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb b/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb index 6adc4411427..fafb1dfbbad 100644 --- a/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb +++ b/cookbook/rag_upstage_layout_analysis_groundedness_check.ipynb @@ -36,7 +36,9 @@ "\n", "docs = loader.load()\n", "\n", - "vectorstore = DocArrayInMemorySearch.from_documents(docs, embedding=UpstageEmbeddings())\n", + "vectorstore = DocArrayInMemorySearch.from_documents(\n", + " docs, embedding=UpstageEmbeddings(model=\"solar-embedding-1-large\")\n", + ")\n", "retriever = vectorstore.as_retriever()\n", "\n", "template = \"\"\"Answer the question based only on the following context:\n", diff --git a/cookbook/rag_with_quantized_embeddings.ipynb b/cookbook/rag_with_quantized_embeddings.ipynb index 79a85d5cc50..a071ec21b16 100644 --- a/cookbook/rag_with_quantized_embeddings.ipynb +++ b/cookbook/rag_with_quantized_embeddings.ipynb @@ -39,12 +39,10 @@ "from langchain_community.document_loaders.recursive_url_loader import (\n", " RecursiveUrlLoader,\n", ")\n", - "\n", - "# noqa\n", "from langchain_community.vectorstores import Chroma\n", "\n", "# For our example, we'll load docs from the web\n", - "from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa\n", + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "\n", "DOCSTORE_DIR = \".\"\n", "DOCSTORE_ID_KEY = \"doc_id\"" diff --git a/docs/Makefile b/docs/Makefile index e13d0489766..13feb881809 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -37,8 +37,6 @@ _generate-files-internal: generate-files: _generate-files-internal mkdir -p $(INTERMEDIATE_DIR)/templates - cp ../templates/docs/INDEX.md $(INTERMEDIATE_DIR)/templates/index.md - cp ../cookbook/README.md $(INTERMEDIATE_DIR)/cookbook.mdx $(PYTHON) scripts/model_feat_table.py $(INTERMEDIATE_DIR) diff --git a/docs/api_reference/create_api_rst.py b/docs/api_reference/create_api_rst.py index 2c8ab44fc47..504d71a5b96 100644 --- a/docs/api_reference/create_api_rst.py +++ b/docs/api_reference/create_api_rst.py @@ -187,7 +187,7 @@ def _load_package_modules( modules_by_namespace[top_namespace] = _module_members except ImportError as e: - print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201 + print(f"Error: Unable to import module '{namespace}' with error: {e}") return modules_by_namespace @@ -364,7 +364,7 @@ def main(dirs: Optional[list] = None) -> None: dirs += [ dir_ for dir_ in os.listdir(ROOT_DIR / "libs" / "partners") - if os.path.isdir(dir_) + if os.path.isdir(ROOT_DIR / "libs" / "partners" / dir_) and "pyproject.toml" in os.listdir(ROOT_DIR / "libs" / "partners" / dir_) ] for dir_ in dirs: diff --git a/docs/docs/additional_resources/arxiv_references.mdx b/docs/docs/additional_resources/arxiv_references.mdx index 8859bf3f3b8..de875d46008 100644 --- a/docs/docs/additional_resources/arxiv_references.mdx +++ b/docs/docs/additional_resources/arxiv_references.mdx @@ -2,32 +2,150 @@ LangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, -and Templates. + Templates, and Cookbooks. ## Summary | arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation| |------------------|---------|-------------------|------------------------| +| `2402.03620v1` [Self-Discover: Large Language Models Self-Compose Reasoning Structures](http://arxiv.org/abs/2402.03620v1) | Pei Zhou, Jay Pujara, Xiang Ren, et al. | 2024-02-06 | `Cookbook:` [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) +| `2401.18059v1` [RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval](http://arxiv.org/abs/2401.18059v1) | Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. | 2024-01-31 | `Cookbook:` [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) +| `2401.15884v2` [Corrective Retrieval Augmented Generation](http://arxiv.org/abs/2401.15884v2) | Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. | 2024-01-29 | `Cookbook:` [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) +| `2401.04088v1` [Mixtral of Experts](http://arxiv.org/abs/2401.04088v1) | Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. | 2024-01-08 | `Cookbook:` [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) | `2312.06648v2` [Dense X Retrieval: What Retrieval Granularity Should We Use?](http://arxiv.org/abs/2312.06648v2) | Tong Chen, Hongwei Wang, Sihao Chen, et al. | 2023-12-11 | `Template:` [propositional-retrieval](https://python.langchain.com/docs/templates/propositional-retrieval) | `2311.09210v1` [Chain-of-Note: Enhancing Robustness in Retrieval-Augmented Language Models](http://arxiv.org/abs/2311.09210v1) | Wenhao Yu, Hongming Zhang, Xiaoman Pan, et al. | 2023-11-15 | `Template:` [chain-of-note-wiki](https://python.langchain.com/docs/templates/chain-of-note-wiki) -| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting) -| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read) -| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot) -| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents) +| `2310.11511v1` [Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection](http://arxiv.org/abs/2310.11511v1) | Akari Asai, Zeqiu Wu, Yizhong Wang, et al. | 2023-10-17 | `Cookbook:` [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) +| `2310.06117v2` [Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models](http://arxiv.org/abs/2310.06117v2) | Huaixiu Steven Zheng, Swaroop Mishra, Xinyun Chen, et al. | 2023-10-09 | `Template:` [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting), `Cookbook:` [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) +| `2307.09288v2` [Llama 2: Open Foundation and Fine-Tuned Chat Models](http://arxiv.org/abs/2307.09288v2) | Hugo Touvron, Louis Martin, Kevin Stone, et al. | 2023-07-18 | `Cookbook:` [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) +| `2305.14283v3` [Query Rewriting for Retrieval-Augmented Large Language Models](http://arxiv.org/abs/2305.14283v3) | Xinbei Ma, Yeyun Gong, Pengcheng He, et al. | 2023-05-23 | `Template:` [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read), `Cookbook:` [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) +| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot), `Cookbook:` [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) +| `2305.04091v3` [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](http://arxiv.org/abs/2305.04091v3) | Lei Wang, Wanyu Xu, Yihuai Lan, et al. | 2023-05-06 | `Cookbook:` [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) +| `2304.08485v2` [Visual Instruction Tuning](http://arxiv.org/abs/2304.08485v2) | Haotian Liu, Chunyuan Li, Qingyang Wu, et al. | 2023-04-17 | `Cookbook:` [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) +| `2304.03442v2` [Generative Agents: Interactive Simulacra of Human Behavior](http://arxiv.org/abs/2304.03442v2) | Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. | 2023-04-07 | `Cookbook:` [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) +| `2303.17760v2` [CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society](http://arxiv.org/abs/2303.17760v2) | Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. | 2023-03-31 | `Cookbook:` [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) +| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents), `Cookbook:` [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) | `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas) -| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint) -| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde) +| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI) +| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder), `Template:` [hyde](https://python.langchain.com/docs/templates/hyde), `Cookbook:` [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) | `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal) | `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector) -| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain) +| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), `Cookbook:` [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) | `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake) | `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings) -| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL) -| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint) +| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `API:` [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) +| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint) | `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip) -| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint) +| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint) | `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers) +## Self-Discover: Large Language Models Self-Compose Reasoning Structures + +- **arXiv id:** 2402.03620v1 +- **Title:** Self-Discover: Large Language Models Self-Compose Reasoning Structures +- **Authors:** Pei Zhou, Jay Pujara, Xiang Ren, et al. +- **Published Date:** 2024-02-06 +- **URL:** http://arxiv.org/abs/2402.03620v1 +- **LangChain:** + + - **Cookbook:** [self-discover](https://github.com/langchain-ai/langchain/blob/master/cookbook/self-discover.ipynb) + +**Abstract:** We introduce SELF-DISCOVER, a general framework for LLMs to self-discover the +task-intrinsic reasoning structures to tackle complex reasoning problems that +are challenging for typical prompting methods. Core to the framework is a +self-discovery process where LLMs select multiple atomic reasoning modules such +as critical thinking and step-by-step thinking, and compose them into an +explicit reasoning structure for LLMs to follow during decoding. SELF-DISCOVER +substantially improves GPT-4 and PaLM 2's performance on challenging reasoning +benchmarks such as BigBench-Hard, grounded agent reasoning, and MATH, by as +much as 32% compared to Chain of Thought (CoT). Furthermore, SELF-DISCOVER +outperforms inference-intensive methods such as CoT-Self-Consistency by more +than 20%, while requiring 10-40x fewer inference compute. Finally, we show that +the self-discovered reasoning structures are universally applicable across +model families: from PaLM 2-L to GPT-4, and from GPT-4 to Llama2, and share +commonalities with human reasoning patterns. + +## RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval + +- **arXiv id:** 2401.18059v1 +- **Title:** RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval +- **Authors:** Parth Sarthi, Salman Abdullah, Aditi Tuli, et al. +- **Published Date:** 2024-01-31 +- **URL:** http://arxiv.org/abs/2401.18059v1 +- **LangChain:** + + - **Cookbook:** [RAPTOR](https://github.com/langchain-ai/langchain/blob/master/cookbook/RAPTOR.ipynb) + +**Abstract:** Retrieval-augmented language models can better adapt to changes in world +state and incorporate long-tail knowledge. However, most existing methods +retrieve only short contiguous chunks from a retrieval corpus, limiting +holistic understanding of the overall document context. We introduce the novel +approach of recursively embedding, clustering, and summarizing chunks of text, +constructing a tree with differing levels of summarization from the bottom up. +At inference time, our RAPTOR model retrieves from this tree, integrating +information across lengthy documents at different levels of abstraction. +Controlled experiments show that retrieval with recursive summaries offers +significant improvements over traditional retrieval-augmented LMs on several +tasks. On question-answering tasks that involve complex, multi-step reasoning, +we show state-of-the-art results; for example, by coupling RAPTOR retrieval +with the use of GPT-4, we can improve the best performance on the QuALITY +benchmark by 20% in absolute accuracy. + +## Corrective Retrieval Augmented Generation + +- **arXiv id:** 2401.15884v2 +- **Title:** Corrective Retrieval Augmented Generation +- **Authors:** Shi-Qi Yan, Jia-Chen Gu, Yun Zhu, et al. +- **Published Date:** 2024-01-29 +- **URL:** http://arxiv.org/abs/2401.15884v2 +- **LangChain:** + + - **Cookbook:** [langgraph_crag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_crag.ipynb) + +**Abstract:** Large language models (LLMs) inevitably exhibit hallucinations since the +accuracy of generated texts cannot be secured solely by the parametric +knowledge they encapsulate. Although retrieval-augmented generation (RAG) is a +practicable complement to LLMs, it relies heavily on the relevance of retrieved +documents, raising concerns about how the model behaves if retrieval goes +wrong. To this end, we propose the Corrective Retrieval Augmented Generation +(CRAG) to improve the robustness of generation. Specifically, a lightweight +retrieval evaluator is designed to assess the overall quality of retrieved +documents for a query, returning a confidence degree based on which different +knowledge retrieval actions can be triggered. Since retrieval from static and +limited corpora can only return sub-optimal documents, large-scale web searches +are utilized as an extension for augmenting the retrieval results. Besides, a +decompose-then-recompose algorithm is designed for retrieved documents to +selectively focus on key information and filter out irrelevant information in +them. CRAG is plug-and-play and can be seamlessly coupled with various +RAG-based approaches. Experiments on four datasets covering short- and +long-form generation tasks show that CRAG can significantly improve the +performance of RAG-based approaches. + +## Mixtral of Experts + +- **arXiv id:** 2401.04088v1 +- **Title:** Mixtral of Experts +- **Authors:** Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, et al. +- **Published Date:** 2024-01-08 +- **URL:** http://arxiv.org/abs/2401.04088v1 +- **LangChain:** + + - **Cookbook:** [together_ai](https://github.com/langchain-ai/langchain/blob/master/cookbook/together_ai.ipynb) + +**Abstract:** We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. +Mixtral has the same architecture as Mistral 7B, with the difference that each +layer is composed of 8 feedforward blocks (i.e. experts). For every token, at +each layer, a router network selects two experts to process the current state +and combine their outputs. Even though each token only sees two experts, the +selected experts can be different at each timestep. As a result, each token has +access to 47B parameters, but only uses 13B active parameters during inference. +Mixtral was trained with a context size of 32k tokens and it outperforms or +matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, +Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and +multilingual benchmarks. We also provide a model fine-tuned to follow +instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, +Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both +the base and instruct models are released under the Apache 2.0 license. + ## Dense X Retrieval: What Retrieval Granularity Should We Use? - **arXiv id:** 2312.06648v2 @@ -91,6 +209,39 @@ average improvement of +7.9 in EM score given entirely noisy retrieved documents and +10.5 in rejection rates for real-time questions that fall outside the pre-training knowledge scope. +## Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection + +- **arXiv id:** 2310.11511v1 +- **Title:** Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection +- **Authors:** Akari Asai, Zeqiu Wu, Yizhong Wang, et al. +- **Published Date:** 2023-10-17 +- **URL:** http://arxiv.org/abs/2310.11511v1 +- **LangChain:** + + - **Cookbook:** [langgraph_self_rag](https://github.com/langchain-ai/langchain/blob/master/cookbook/langgraph_self_rag.ipynb) + +**Abstract:** Despite their remarkable capabilities, large language models (LLMs) often +produce responses containing factual inaccuracies due to their sole reliance on +the parametric knowledge they encapsulate. Retrieval-Augmented Generation +(RAG), an ad hoc approach that augments LMs with retrieval of relevant +knowledge, decreases such issues. However, indiscriminately retrieving and +incorporating a fixed number of retrieved passages, regardless of whether +retrieval is necessary, or passages are relevant, diminishes LM versatility or +can lead to unhelpful response generation. We introduce a new framework called +Self-Reflective Retrieval-Augmented Generation (Self-RAG) that enhances an LM's +quality and factuality through retrieval and self-reflection. Our framework +trains a single arbitrary LM that adaptively retrieves passages on-demand, and +generates and reflects on retrieved passages and its own generations using +special tokens, called reflection tokens. Generating reflection tokens makes +the LM controllable during the inference phase, enabling it to tailor its +behavior to diverse task requirements. Experiments show that Self-RAG (7B and +13B parameters) significantly outperforms state-of-the-art LLMs and +retrieval-augmented models on a diverse set of tasks. Specifically, Self-RAG +outperforms ChatGPT and retrieval-augmented Llama2-chat on Open-domain QA, +reasoning and fact verification tasks, and it shows significant gains in +improving factuality and citation accuracy for long-form generations relative +to these models. + ## Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models - **arXiv id:** 2310.06117v2 @@ -101,6 +252,7 @@ outside the pre-training knowledge scope. - **LangChain:** - **Template:** [stepback-qa-prompting](https://python.langchain.com/docs/templates/stepback-qa-prompting) + - **Cookbook:** [stepback-qa](https://github.com/langchain-ai/langchain/blob/master/cookbook/stepback-qa.ipynb) **Abstract:** We present Step-Back Prompting, a simple prompting technique that enables LLMs to do abstractions to derive high-level concepts and first principles from @@ -113,6 +265,27 @@ including STEM, Knowledge QA, and Multi-Hop Reasoning. For instance, Step-Back Prompting improves PaLM-2L performance on MMLU (Physics and Chemistry) by 7% and 11% respectively, TimeQA by 27%, and MuSiQue by 7%. +## Llama 2: Open Foundation and Fine-Tuned Chat Models + +- **arXiv id:** 2307.09288v2 +- **Title:** Llama 2: Open Foundation and Fine-Tuned Chat Models +- **Authors:** Hugo Touvron, Louis Martin, Kevin Stone, et al. +- **Published Date:** 2023-07-18 +- **URL:** http://arxiv.org/abs/2307.09288v2 +- **LangChain:** + + - **Cookbook:** [Semi_Structured_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_Structured_RAG.ipynb) + +**Abstract:** In this work, we develop and release Llama 2, a collection of pretrained and +fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 +billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for +dialogue use cases. Our models outperform open-source chat models on most +benchmarks we tested, and based on our human evaluations for helpfulness and +safety, may be a suitable substitute for closed-source models. We provide a +detailed description of our approach to fine-tuning and safety improvements of +Llama 2-Chat in order to enable the community to build on our work and +contribute to the responsible development of LLMs. + ## Query Rewriting for Retrieval-Augmented Large Language Models - **arXiv id:** 2305.14283v3 @@ -123,6 +296,7 @@ and 11% respectively, TimeQA by 27%, and MuSiQue by 7%. - **LangChain:** - **Template:** [rewrite-retrieve-read](https://python.langchain.com/docs/templates/rewrite-retrieve-read) + - **Cookbook:** [rewrite](https://github.com/langchain-ai/langchain/blob/master/cookbook/rewrite.ipynb) **Abstract:** Large Language Models (LLMs) play powerful, black-box readers in the retrieve-then-read pipeline, making remarkable progress in knowledge-intensive @@ -152,6 +326,7 @@ for retrieval-augmented LLM. - **LangChain:** - **API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot) + - **Cookbook:** [tree_of_thought](https://github.com/langchain-ai/langchain/blob/master/cookbook/tree_of_thought.ipynb) **Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel approach aimed at improving the problem-solving capabilities of auto-regressive @@ -171,6 +346,132 @@ significantly increase the success rate of Sudoku puzzle solving. Our implementation of the ToT-based Sudoku solver is available on GitHub: \url{https://github.com/jieyilong/tree-of-thought-puzzle-solver}. +## Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models + +- **arXiv id:** 2305.04091v3 +- **Title:** Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models +- **Authors:** Lei Wang, Wanyu Xu, Yihuai Lan, et al. +- **Published Date:** 2023-05-06 +- **URL:** http://arxiv.org/abs/2305.04091v3 +- **LangChain:** + + - **Cookbook:** [plan_and_execute_agent](https://github.com/langchain-ai/langchain/blob/master/cookbook/plan_and_execute_agent.ipynb) + +**Abstract:** Large language models (LLMs) have recently been shown to deliver impressive +performance in various NLP tasks. To tackle multi-step reasoning tasks, +few-shot chain-of-thought (CoT) prompting includes a few manually crafted +step-by-step reasoning demonstrations which enable LLMs to explicitly generate +reasoning steps and improve their reasoning task accuracy. To eliminate the +manual effort, Zero-shot-CoT concatenates the target problem statement with +"Let's think step by step" as an input prompt to LLMs. Despite the success of +Zero-shot-CoT, it still suffers from three pitfalls: calculation errors, +missing-step errors, and semantic misunderstanding errors. To address the +missing-step errors, we propose Plan-and-Solve (PS) Prompting. It consists of +two components: first, devising a plan to divide the entire task into smaller +subtasks, and then carrying out the subtasks according to the plan. To address +the calculation errors and improve the quality of generated reasoning steps, we +extend PS prompting with more detailed instructions and derive PS+ prompting. +We evaluate our proposed prompting strategy on ten datasets across three +reasoning problems. The experimental results over GPT-3 show that our proposed +zero-shot prompting consistently outperforms Zero-shot-CoT across all datasets +by a large margin, is comparable to or exceeds Zero-shot-Program-of-Thought +Prompting, and has comparable performance with 8-shot CoT prompting on the math +reasoning problem. The code can be found at +https://github.com/AGI-Edgerunners/Plan-and-Solve-Prompting. + +## Visual Instruction Tuning + +- **arXiv id:** 2304.08485v2 +- **Title:** Visual Instruction Tuning +- **Authors:** Haotian Liu, Chunyuan Li, Qingyang Wu, et al. +- **Published Date:** 2023-04-17 +- **URL:** http://arxiv.org/abs/2304.08485v2 +- **LangChain:** + + - **Cookbook:** [Semi_structured_and_multi_modal_RAG](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_and_multi_modal_RAG.ipynb), [Semi_structured_multi_modal_RAG_LLaMA2](https://github.com/langchain-ai/langchain/blob/master/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb) + +**Abstract:** Instruction tuning large language models (LLMs) using machine-generated +instruction-following data has improved zero-shot capabilities on new tasks, +but the idea is less explored in the multimodal field. In this paper, we +present the first attempt to use language-only GPT-4 to generate multimodal +language-image instruction-following data. By instruction tuning on such +generated data, we introduce LLaVA: Large Language and Vision Assistant, an +end-to-end trained large multimodal model that connects a vision encoder and +LLM for general-purpose visual and language understanding.Our early experiments +show that LLaVA demonstrates impressive multimodel chat abilities, sometimes +exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and +yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal +instruction-following dataset. When fine-tuned on Science QA, the synergy of +LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make +GPT-4 generated visual instruction tuning data, our model and code base +publicly available. + +## Generative Agents: Interactive Simulacra of Human Behavior + +- **arXiv id:** 2304.03442v2 +- **Title:** Generative Agents: Interactive Simulacra of Human Behavior +- **Authors:** Joon Sung Park, Joseph C. O'Brien, Carrie J. Cai, et al. +- **Published Date:** 2023-04-07 +- **URL:** http://arxiv.org/abs/2304.03442v2 +- **LangChain:** + + - **Cookbook:** [multiagent_bidding](https://github.com/langchain-ai/langchain/blob/master/cookbook/multiagent_bidding.ipynb), [generative_agents_interactive_simulacra_of_human_behavior](https://github.com/langchain-ai/langchain/blob/master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb) + +**Abstract:** Believable proxies of human behavior can empower interactive applications +ranging from immersive environments to rehearsal spaces for interpersonal +communication to prototyping tools. In this paper, we introduce generative +agents--computational software agents that simulate believable human behavior. +Generative agents wake up, cook breakfast, and head to work; artists paint, +while authors write; they form opinions, notice each other, and initiate +conversations; they remember and reflect on days past as they plan the next +day. To enable generative agents, we describe an architecture that extends a +large language model to store a complete record of the agent's experiences +using natural language, synthesize those memories over time into higher-level +reflections, and retrieve them dynamically to plan behavior. We instantiate +generative agents to populate an interactive sandbox environment inspired by +The Sims, where end users can interact with a small town of twenty five agents +using natural language. In an evaluation, these generative agents produce +believable individual and emergent social behaviors: for example, starting with +only a single user-specified notion that one agent wants to throw a Valentine's +Day party, the agents autonomously spread invitations to the party over the +next two days, make new acquaintances, ask each other out on dates to the +party, and coordinate to show up for the party together at the right time. We +demonstrate through ablation that the components of our agent +architecture--observation, planning, and reflection--each contribute critically +to the believability of agent behavior. By fusing large language models with +computational, interactive agents, this work introduces architectural and +interaction patterns for enabling believable simulations of human behavior. + +## CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society + +- **arXiv id:** 2303.17760v2 +- **Title:** CAMEL: Communicative Agents for "Mind" Exploration of Large Language Model Society +- **Authors:** Guohao Li, Hasan Abed Al Kader Hammoud, Hani Itani, et al. +- **Published Date:** 2023-03-31 +- **URL:** http://arxiv.org/abs/2303.17760v2 +- **LangChain:** + + - **Cookbook:** [camel_role_playing](https://github.com/langchain-ai/langchain/blob/master/cookbook/camel_role_playing.ipynb) + +**Abstract:** The rapid advancement of chat-based language models has led to remarkable +progress in complex task-solving. However, their success heavily relies on +human input to guide the conversation, which can be challenging and +time-consuming. This paper explores the potential of building scalable +techniques to facilitate autonomous cooperation among communicative agents, and +provides insight into their "cognitive" processes. To address the challenges of +achieving autonomous cooperation, we propose a novel communicative agent +framework named role-playing. Our approach involves using inception prompting +to guide chat agents toward task completion while maintaining consistency with +human intentions. We showcase how role-playing can be used to generate +conversational data for studying the behaviors and capabilities of a society of +agents, providing a valuable resource for investigating conversational language +models. In particular, we conduct comprehensive studies on +instruction-following cooperation in multi-agent settings. Our contributions +include introducing a novel communicative agent framework, offering a scalable +approach for studying the cooperative behaviors and capabilities of multi-agent +systems, and open-sourcing our library to support research on communicative +agents and beyond: https://github.com/camel-ai/camel. + ## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face - **arXiv id:** 2303.17580v4 @@ -181,6 +482,7 @@ implementation of the ToT-based Sudoku solver is available on GitHub: - **LangChain:** - **API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents) + - **Cookbook:** [hugginggpt](https://github.com/langchain-ai/langchain/blob/master/cookbook/hugginggpt.ipynb) **Abstract:** Solving complicated AI tasks with different domains and modalities is a key step toward artificial general intelligence. While there are numerous AI models @@ -235,7 +537,7 @@ more than 1/1,000th the compute of GPT-4. - **URL:** http://arxiv.org/abs/2301.10226v4 - **LangChain:** - - **API Reference:** [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint) + - **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI) **Abstract:** Potential harms of large language models can be mitigated by watermarking model output, i.e., embedding signals into generated text that are invisible to @@ -262,6 +564,7 @@ family, and discuss robustness and security. - **API Reference:** [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder) - **Template:** [hyde](https://python.langchain.com/docs/templates/hyde) + - **Cookbook:** [hypothetical_document_embeddings](https://github.com/langchain-ai/langchain/blob/master/cookbook/hypothetical_document_embeddings.ipynb) **Abstract:** While dense retrieval has been shown effective and efficient across tasks and languages, it remains difficult to create effective fully zero-shot dense @@ -351,7 +654,8 @@ performance across three real-world tasks on multiple LLMs. - **URL:** http://arxiv.org/abs/2211.10435v2 - **LangChain:** - - **API Reference:** [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain) + - **API Reference:** [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain), [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain) + - **Cookbook:** [program_aided_language_model](https://github.com/langchain-ai/langchain/blob/master/cookbook/program_aided_language_model.ipynb) **Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability to perform arithmetic and symbolic reasoning tasks, when provided with a few @@ -442,7 +746,7 @@ encoders, mine bitexts, and validate the bitexts by training NMT systems. - **URL:** http://arxiv.org/abs/2204.00498v1 - **LangChain:** - - **API Reference:** [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL) + - **API Reference:** [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL), [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase) **Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex language model. We find that, without any finetuning, Codex is a strong @@ -461,7 +765,7 @@ few-shot examples. - **URL:** http://arxiv.org/abs/2202.00666v5 - **LangChain:** - - **API Reference:** [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint) + - **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint) **Abstract:** Today's probabilistic language generators fall short when it comes to producing coherent and fluent text despite the fact that the underlying models @@ -525,7 +829,7 @@ https://github.com/OpenAI/CLIP. - **URL:** http://arxiv.org/abs/1909.05858v2 - **LangChain:** - - **API Reference:** [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint) + - **API Reference:** [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_huggingface.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_huggingface.llms.huggingface_endpoint.HuggingFaceEndpoint) **Abstract:** Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We diff --git a/docs/docs/concepts.mdx b/docs/docs/concepts.mdx index 924be764ca3..b0ce1c8f197 100644 --- a/docs/docs/concepts.mdx +++ b/docs/docs/concepts.mdx @@ -174,7 +174,7 @@ The `content` property describes the content of the message. This can be a few different things: - A string (most models deal this type of content) -- A List of dictionaries (this is used for multi-modal input, where the dictionary contains information about that input type and that input location) +- A List of dictionaries (this is used for multimodal input, where the dictionary contains information about that input type and that input location) #### HumanMessage @@ -476,86 +476,81 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how It is recommended, however, that you start to transition to LangGraph. In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent) +### Multimodal + +Some models are multimodal, accepting images, audio and even video as inputs. These are still less common, meaning model providers haven't standardized on the "best" way to define the API. Multimodal **outputs** are even less common. As such, we've kept our multimodal abstractions fairly light weight and plan to further solidify the multimodal APIs and interaction patterns as the field matures. + +In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format. So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations. + ### Callbacks LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks. You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail. +#### Callback Events + +| Event | Event Trigger | Associated Method | +|------------------|---------------------------------------------|-----------------------| +| Chat model start | When a chat model starts | `on_chat_model_start` | +| LLM start | When a llm starts | `on_llm_start` | +| LLM new token | When an llm OR chat model emits a new token | `on_llm_new_token` | +| LLM ends | When an llm OR chat model ends | `on_llm_end` | +| LLM errors | When an llm OR chat model errors | `on_llm_error` | +| Chain start | When a chain starts running | `on_chain_start` | +| Chain end | When a chain ends | `on_chain_end` | +| Chain error | When a chain errors | `on_chain_error` | +| Tool start | When a tool starts running | `on_tool_start` | +| Tool end | When a tool ends | `on_tool_end` | +| Tool error | When a tool errors | `on_tool_error` | +| Agent action | When an agent takes an action | `on_agent_action` | +| Agent finish | When an agent ends | `on_agent_finish` | +| Retriever start | When a retriever starts | `on_retriever_start` | +| Retriever end | When a retriever ends | `on_retriever_end` | +| Retriever error | When a retriever errors | `on_retriever_error` | +| Text | When arbitrary text is run | `on_text` | +| Retry | When a retry event is run | `on_retry` | + #### Callback handlers -`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) interface, which has a method for each event that can be subscribed to. -The `CallbackManager` will call the appropriate method on each handler when the event is triggered. +Callback handlers can either be `sync` or `async`: -```python -class BaseCallbackHandler: - """Base callback handler that can be used to handle callbacks from langchain.""" +* Sync callback handlers implement the [BaseCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html) interface. +* Async callback handlers implement the [AsyncCallbackHandler](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) interface. - def on_llm_start( - self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any - ) -> Any: - """Run when LLM starts running.""" - - def on_chat_model_start( - self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any - ) -> Any: - """Run when Chat Model starts running.""" - - def on_llm_new_token(self, token: str, **kwargs: Any) -> Any: - """Run on new LLM token. Only available when streaming is enabled.""" - - def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any: - """Run when LLM ends running.""" - - def on_llm_error( - self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any - ) -> Any: - """Run when LLM errors.""" - - def on_chain_start( - self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any - ) -> Any: - """Run when chain starts running.""" - - def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any: - """Run when chain ends running.""" - - def on_chain_error( - self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any - ) -> Any: - """Run when chain errors.""" - - def on_tool_start( - self, serialized: Dict[str, Any], input_str: str, **kwargs: Any - ) -> Any: - """Run when tool starts running.""" - - def on_tool_end(self, output: Any, **kwargs: Any) -> Any: - """Run when tool ends running.""" - - def on_tool_error( - self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any - ) -> Any: - """Run when tool errors.""" - - def on_text(self, text: str, **kwargs: Any) -> Any: - """Run on arbitrary text.""" - - def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: - """Run on agent action.""" - - def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: - """Run on agent end.""" -``` +During run-time LangChain configures an appropriate callback manager (e.g., [CallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.CallbackManager.html) or [AsyncCallbackManager](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.manager.AsyncCallbackManager.html) which will be responsible for calling the appropriate method on each "registered" callback handler when the event is triggered. #### Passing callbacks The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places: -- **Constructor callbacks**: defined in the constructor, e.g. `ChatAnthropic(callbacks=[handler], tags=['a-tag'])`. In this case, the callbacks will be used for all calls made on that object, and will be scoped to that object only. - For example, if you initialize a chat model with constructor callbacks, then use it within a chain, the callbacks will only be invoked for calls to that model. -- **Request callbacks**: passed into the `invoke` method used for issuing a request. In this case, the callbacks will be used for that specific request only, and all sub-requests that it contains (e.g. a call to a sequence that triggers a call to a model, which uses the same handler passed in the `invoke()` method). - In the `invoke()` method, callbacks are passed through the `config` parameter. +The callbacks are available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places: + +- **Request time callbacks**: Passed at the time of the request in addition to the input data. + Available on all standard `Runnable` objects. These callbacks are INHERITED by all children + of the object they are defined on. For example, `chain.invoke({"number": 25}, {"callbacks": [handler]})`. +- **Constructor callbacks**: `chain = TheNameOfSomeChain(callbacks=[handler])`. These callbacks + are passed as arguments to the constructor of the object. The callbacks are scoped + only to the object they are defined on, and are **not** inherited by any children of the object. + +:::warning +Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children +of the object. +::: + +If you're creating a custom chain or runnable, you need to remember to propagate request time +callbacks to any child objects. + +:::important Async in Python<=3.10 + +Any `RunnableLambda`, a `RunnableGenerator`, or `Tool` that invokes other runnables +and is running async in python<=3.10, will have to propagate callbacks to child +objects manually. This is because LangChain cannot automatically propagate +callbacks to child objects in this case. + +This is a common reason why you may fail to see events being emitted from custom +runnables or tools. +::: ## Techniques @@ -653,3 +648,7 @@ Table columns: | Character | [CharacterTextSplitter](/docs/how_to/character_text_splitter/) | A user defined character | | Splits text based on a user defined character. One of the simpler methods. | | Semantic Chunker (Experimental) | [SemanticChunker](/docs/how_to/semantic-chunker/) | Sentences | | First splits on sentences. Then combines ones next to each other if they are semantically similar enough. Taken from [Greg Kamradt](https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb) | | Integration: AI21 Semantic | [AI21SemanticTextSplitter](/docs/integrations/document_transformers/ai21_semantic_text_splitter/) | ✅ | Identifies distinct topics that form coherent pieces of text and splits along those. | + + + + diff --git a/docs/docs/contributing/how_to/documentation.mdx b/docs/docs/contributing/how_to/documentation.mdx index 4dbb0204df1..2c38c6a6d71 100644 --- a/docs/docs/contributing/how_to/documentation.mdx +++ b/docs/docs/contributing/how_to/documentation.mdx @@ -71,6 +71,8 @@ make docs_clean make api_docs_clean ``` + + Next, you can build the documentation as outlined below: ```bash @@ -78,6 +80,18 @@ make docs_build make api_docs_build ``` +:::tip + +The `make api_docs_build` command takes a long time. If you're making cosmetic changes to the API docs and want to see how they look, use: + +```bash +make api_docs_quick_preview +``` + +which will just build a small subset of the API reference. + +::: + Finally, run the link checker to ensure all links are valid: ```bash diff --git a/docs/docs/how_to/agent_executor.ipynb b/docs/docs/how_to/agent_executor.ipynb index a1fcc4f7ab0..fb1d8709cf4 100644 --- a/docs/docs/how_to/agent_executor.ipynb +++ b/docs/docs/how_to/agent_executor.ipynb @@ -19,13 +19,13 @@ "\n", "By themselves, language models can't take actions - they just output text.\n", "A big use case for LangChain is creating **agents**.\n", - "Agents are systems that use an LLM as a reasoning enginer to determine which actions to take and what the inputs to those actions should be.\n", - "The results of those actions can then be fed back into the agent and it determine whether more actions are needed, or whether it is okay to finish.\n", + "Agents are systems that use an LLM as a reasoning engine to determine which actions to take and what the inputs to those actions should be.\n", + "The results of those actions can then be fed back into the agent and it determines whether more actions are needed, or whether it is okay to finish.\n", "\n", - "In this tutorial we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n", + "In this tutorial, we will build an agent that can interact with multiple different tools: one being a local database, the other being a search engine. You will be able to ask this agent questions, watch it call tools, and have conversations with it.\n", "\n", ":::{.callout-important}\n", - "This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd reccommend checking out [LangGraph](/docs/concepts/#langgraph)\n", + "This section will cover building with LangChain Agents. LangChain Agents are fine for getting started, but past a certain point, you will likely want flexibility and control that they do not offer. For working with more advanced agents, we'd reccommend checking out [LangGraph](/docs/concepts/#langgraph)\n", ":::\n", "\n", "## Concepts\n", @@ -34,7 +34,7 @@ "- Using [language models](/docs/concepts/#chat-models), in particular their tool calling ability\n", "- Creating a [Retriever](/docs/concepts/#retrievers) to expose specific information to our agent\n", "- Using a Search [Tool](/docs/concepts/#tools) to look up things online\n", - "- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to followup questions. \n", + "- [`Chat History`](/docs/concepts/#chat-history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to follow-up questions. \n", "- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n", "\n", "## Setup\n", diff --git a/docs/docs/how_to/callbacks_async.ipynb b/docs/docs/how_to/callbacks_async.ipynb index c9b9cb406dd..5d183efbbed 100644 --- a/docs/docs/how_to/callbacks_async.ipynb +++ b/docs/docs/how_to/callbacks_async.ipynb @@ -12,12 +12,20 @@ "\n", "- [Callbacks](/docs/concepts/#callbacks)\n", "- [Custom callback handlers](/docs/how_to/custom_callbacks)\n", - "\n", ":::\n", "\n", - "If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the runloop.\n", + "If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the event.\n", "\n", - "**Note**: if you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe." + "\n", + ":::{.callout-warning}\n", + "If you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe.\n", + ":::\n", + "\n", + ":::{.callout-danger}\n", + "\n", + "If you're on `python<=3.10`, you need to remember to propagate `config` or `callbacks` when invoking other `runnable` from within a `RunnableLambda`, `RunnableGenerator` or `@tool`. If you do not do this,\n", + "the callbacks will not be propagated to the child runnables being invoked.\n", + ":::" ] }, { @@ -149,7 +157,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -163,9 +171,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.9.6" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/docs/how_to/callbacks_attach.ipynb b/docs/docs/how_to/callbacks_attach.ipynb index 8424948e10c..5e361cd3c5f 100644 --- a/docs/docs/how_to/callbacks_attach.ipynb +++ b/docs/docs/how_to/callbacks_attach.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# How to attach callbacks to a module\n", + "# How to attach callbacks to a runnable\n", "\n", ":::info Prerequisites\n", "\n", @@ -19,6 +19,11 @@ "\n", "If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n", "\n", + ":::{.callout-important}\n", + "\n", + "`with_config()` binds a configuration which will be interpreted as **runtime** configuration. So these callbacks will propagate to all child components.\n", + ":::\n", + "\n", "Here's an example:" ] }, @@ -41,7 +46,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -52,17 +57,17 @@ "Chain ChatPromptTemplate started\n", "Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n", "Chat model started\n", - "Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0'))]] llm_output={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n", - "Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0'\n" + "Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'))]] llm_output={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n", + "Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0'\n" ] }, { "data": { "text/plain": [ - "AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0')" + "AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01NTYMsH9YxkoWsiPYs4Lemn', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-d6bcfd72-9c94-466d-bac0-f39e456ad6e3-0')" ] }, - "execution_count": 2, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -122,7 +127,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -136,9 +141,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.11.4" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/docs/how_to/callbacks_constructor.ipynb b/docs/docs/how_to/callbacks_constructor.ipynb index b90b099581d..73190e21702 100644 --- a/docs/docs/how_to/callbacks_constructor.ipynb +++ b/docs/docs/how_to/callbacks_constructor.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# How to pass callbacks into a module constructor\n", + "# How to propagate callbacks constructor\n", "\n", ":::info Prerequisites\n", "\n", @@ -15,7 +15,12 @@ "\n", ":::\n", "\n", - "Most LangChain modules allow you to pass `callbacks` directly into the constructor. In this case, the callbacks will only be called for that instance (and any nested runs).\n", + "Most LangChain modules allow you to pass `callbacks` directly into the constructor (i.e., initializer). In this case, the callbacks will only be called for that instance (and any nested runs).\n", + "\n", + ":::{.callout-warning}\n", + "Constructor callbacks are scoped only to the object they are defined on. They are **not** inherited by children of the object. This can lead to confusing behavior,\n", + "and it's generally better to pass callbacks as a run time argument.\n", + ":::\n", "\n", "Here's an example:" ] @@ -114,7 +119,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -128,9 +133,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.5" + "version": "3.11.4" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/docs/how_to/embed_text.mdx b/docs/docs/how_to/embed_text.mdx index 1be7b054d1e..ed75a36a624 100644 --- a/docs/docs/how_to/embed_text.mdx +++ b/docs/docs/how_to/embed_text.mdx @@ -75,6 +75,31 @@ Otherwise you can initialize without any params: from langchain_cohere import CohereEmbeddings embeddings_model = CohereEmbeddings() +``` + + + + +To start we'll need to install the Hugging Face partner package: + +```bash +pip install langchain-huggingface +``` + +You can then load any [Sentence Transformers model](https://huggingface.co/models?library=sentence-transformers) from the Hugging Face Hub. + +```python +from langchain_huggingface import HuggingFaceEmbeddings + +embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") +``` + +You can also leave the `model_name` blank to use the default [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) model. + +```python +from langchain_huggingface import HuggingFaceEmbeddings + +embeddings_model = HuggingFaceEmbeddings() ``` diff --git a/docs/docs/how_to/graph_mapping.ipynb b/docs/docs/how_to/graph_mapping.ipynb index 93c00134413..df3d08b8b86 100644 --- a/docs/docs/how_to/graph_mapping.ipynb +++ b/docs/docs/how_to/graph_mapping.ipynb @@ -300,7 +300,7 @@ "Entities in the question map to the following database values:\n", "{entities_list}\n", "Question: {question}\n", - "Cypher query:\"\"\" # noqa: E501\n", + "Cypher query:\"\"\"\n", "\n", "cypher_prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -377,7 +377,7 @@ "response_template = \"\"\"Based on the the question, Cypher query, and Cypher response, write a natural language response:\n", "Question: {question}\n", "Cypher query: {query}\n", - "Cypher Response: {response}\"\"\" # noqa: E501\n", + "Cypher Response: {response}\"\"\"\n", "\n", "response_prompt = ChatPromptTemplate.from_messages(\n", " [\n", diff --git a/docs/docs/how_to/index.mdx b/docs/docs/how_to/index.mdx index 39f5fe77838..3e3637b6a4b 100644 --- a/docs/docs/how_to/index.mdx +++ b/docs/docs/how_to/index.mdx @@ -174,7 +174,12 @@ LangChain Tools contain a description of the tool (to pass to the language model - [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting) - [How to: add a human in the loop to tool usage](/docs/how_to/tools_human) - [How to: handle errors when calling tools](/docs/how_to/tools_error) -- [How to: call tools using multi-modal data](/docs/how_to/tool_calls_multi_modal) + +### Multimodal + +- [How to: pass multimodal data directly to models](/docs/how_to/multimodal_inputs/) +- [How to: use multimodal prompts](/docs/how_to/multimodal_prompts/) + ### Agents diff --git a/docs/docs/how_to/indexing.ipynb b/docs/docs/how_to/indexing.ipynb index 48c4401b6e4..ac131330fb0 100644 --- a/docs/docs/how_to/indexing.ipynb +++ b/docs/docs/how_to/indexing.ipynb @@ -60,7 +60,7 @@ " * document addition by id (`add_documents` method with `ids` argument)\n", " * delete by id (`delete` method with `ids` argument)\n", "\n", - "Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n", + "Compatible Vectorstores: `Aerospike`, `AnalyticDB`, `AstraDB`, `AwaDB`, `Bagel`, `Cassandra`, `Chroma`, `CouchbaseVectorStore`, `DashVector`, `DatabricksVectorSearch`, `DeepLake`, `Dingo`, `ElasticVectorSearch`, `ElasticsearchStore`, `FAISS`, `HanaDB`, `Milvus`, `MyScale`, `OpenSearchVectorSearch`, `PGVector`, `Pinecone`, `Qdrant`, `Redis`, `Rockset`, `ScaNN`, `SupabaseVectorStore`, `SurrealDBStore`, `TimescaleVector`, `Vald`, `VDMS`, `Vearch`, `VespaStore`, `Weaviate`, `Yellowbrick`, `ZepVectorStore`, `TencentVectorDB`, `OpenSearchVectorSearch`.\n", " \n", "## Caution\n", "\n", diff --git a/docs/docs/how_to/message_history.ipynb b/docs/docs/how_to/message_history.ipynb index 5de232ce36d..dd343fc2661 100644 --- a/docs/docs/how_to/message_history.ipynb +++ b/docs/docs/how_to/message_history.ipynb @@ -73,7 +73,6 @@ "outputs": [], "source": [ "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_openai.chat_models import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -147,8 +146,18 @@ "id": "01acb505-3fd3-4ab4-9f04-5ea07e81542e", "metadata": {}, "source": [ + ":::info\n", + "\n", "Note that we've specified `input_messages_key` (the key to be treated as the latest input message) and `history_messages_key` (the key to add historical messages to).\n", "\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "id": "35222c30", + "metadata": {}, + "source": [ "When invoking this new runnable, we specify the corresponding chat history via a configuration parameter:" ] }, @@ -161,7 +170,7 @@ { "data": { "text/plain": [ - "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse of a right triangle.', response_metadata={'id': 'msg_017rAM9qrBTSdJ5i1rwhB7bT', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 31}}, id='run-65e94a5e-a804-40de-ba88-d01b6cd06864-0')" + "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse of a right triangle.', response_metadata={'id': 'msg_01DH8iRBELVbF3sqM8U5sk8A', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 31}}, id='run-e07fc012-a4f6-4e47-8ef8-250f296eba5b-0')" ] }, "execution_count": 4, @@ -185,7 +194,7 @@ { "data": { "text/plain": [ - "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse of a right triangle.', response_metadata={'id': 'msg_017hK1Q63ganeQZ9wdeqruLP', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 68, 'output_tokens': 31}}, id='run-a42177ef-b04a-4968-8606-446fb465b943-0')" + "AIMessage(content='The inverse of the cosine function is called the arccosine or inverse cosine.', response_metadata={'id': 'msg_015TeeRQBvTvc7XG1JxYqZyq', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 72, 'output_tokens': 22}}, id='run-32ae22ea-3b2f-4d38-8c8a-cb8702e2f3e7-0')" ] }, "execution_count": 5, @@ -196,11 +205,31 @@ "source": [ "# Remembers\n", "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What?\"},\n", + " {\"ability\": \"math\", \"input\": \"What is its inverse called?\"},\n", " config={\"configurable\": {\"session_id\": \"abc123\"}},\n", ")" ] }, + { + "cell_type": "markdown", + "id": "e0c651e5", + "metadata": {}, + "source": [ + ":::info\n", + "\n", + "Note that in this case the context is preserved via the chat history for the provided `session_id`, so the model knows that \"it\" refers to \"cosine\" in this case.\n", + "\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "id": "a44f8d5f", + "metadata": {}, + "source": [ + "Now let's try a different `session_id`" + ] + }, { "cell_type": "code", "execution_count": 6, @@ -210,7 +239,7 @@ { "data": { "text/plain": [ - "AIMessage(content=\"I'm an AI assistant skilled in mathematics. How can I help you with a math-related task?\", response_metadata={'id': 'msg_01AYwfQ6SH5qz8ZQMW3nYtGU', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 28, 'output_tokens': 24}}, id='run-c57d93e3-305f-4c0e-bdb9-ef82f5b49f61-0')" + "AIMessage(content='The inverse of a function is the function that undoes the original function.', response_metadata={'id': 'msg_01M8WbHWg2sjWTz3m3NKqZuF', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 18}}, id='run-b64c73d6-03ee-4b0a-85e0-34beb45408d4-0')" ] }, "execution_count": 6, @@ -221,11 +250,27 @@ "source": [ "# New session_id --> does not remember.\n", "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"What?\"},\n", + " {\"ability\": \"math\", \"input\": \"What is its inverse called?\"},\n", " config={\"configurable\": {\"session_id\": \"def234\"}},\n", ")" ] }, + { + "cell_type": "markdown", + "id": "5416e195", + "metadata": {}, + "source": [ + "When we pass a different `session_id`, we start a new chat history, so the model does not know what \"it\" refers to." + ] + }, + { + "cell_type": "markdown", + "id": "a6710e65", + "metadata": {}, + "source": [ + "### Customization" + ] + }, { "cell_type": "markdown", "id": "d29497be-3366-408d-bbb9-d4a8bf4ef37c", @@ -243,7 +288,7 @@ { "data": { "text/plain": [ - "AIMessage(content='Hello! How can I assist you with math today?', response_metadata={'id': 'msg_01UdhnwghuSE7oRM57STFhHL', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 27, 'output_tokens': 14}}, id='run-3d53f67a-4ea7-4d78-8e67-37db43d4af5d-0')" + "AIMessage(content=\"Why can't a bicycle stand up on its own? It's two-tired!\", response_metadata={'id': 'msg_011qHi8pvbNkKhRb9XYRm2kc', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 30, 'output_tokens': 20}}, id='run-5d1d5b5a-ccec-4c2c-b11a-f1953dbe85a3-0')" ] }, "execution_count": 7, @@ -289,11 +334,69 @@ ")\n", "\n", "with_message_history.invoke(\n", - " {\"ability\": \"math\", \"input\": \"Hello\"},\n", + " {\"ability\": \"jokes\", \"input\": \"Tell me a joke\"},\n", " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", ")" ] }, + { + "cell_type": "code", + "execution_count": 8, + "id": "4f282883", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='The joke was about a bicycle not being able to stand up on its own because it\\'s \"two-tired\" (too tired).', response_metadata={'id': 'msg_01LbrkfidZgseBMxxRjQXJQH', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 59, 'output_tokens': 30}}, id='run-8b2ca810-77d7-44b8-b27b-677e0062b19a-0')" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# remembers\n", + "with_message_history.invoke(\n", + " {\"ability\": \"jokes\", \"input\": \"What was the joke about?\"},\n", + " config={\"configurable\": {\"user_id\": \"123\", \"conversation_id\": \"1\"}},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "fc122c18", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=\"I'm afraid I don't have enough context to provide a relevant joke. As an AI assistant, I don't actually have pre-programmed jokes. I'd be happy to try generating a humorous response if you provide more details about the context.\", response_metadata={'id': 'msg_01PgSp46hNJnKyNfNKPDauQ9', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 32, 'output_tokens': 54}}, id='run-ed202892-27e4-4da9-a26d-e0dc16b10940-0')" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# New user_id --> does not remember\n", + "with_message_history.invoke(\n", + " {\"ability\": \"jokes\", \"input\": \"What was the joke about?\"},\n", + " config={\"configurable\": {\"user_id\": \"456\", \"conversation_id\": \"1\"}},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3ce37565", + "metadata": {}, + "source": [ + "Note that in this case the context was preserved for the same `user_id`, but once we changed it, the new chat history was started, even though the `conversation_id` was the same." + ] + }, { "cell_type": "markdown", "id": "18f1a459-3f88-4ee6-8542-76a907070dd6", @@ -314,17 +417,17 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "17733d4f-3a32-4055-9d44-5d58b9446a26", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'output_message': AIMessage(content='Simone de Beauvoir was a prominent French existentialist philosopher who had some key beliefs about free will:\\n\\n1. Radical Freedom: De Beauvoir believed that humans have radical freedom - the ability to choose and define themselves through their actions. She rejected determinism and believed that we are not simply products of our biology, upbringing, or social circumstances.\\n\\n2. Ambiguity of the Human Condition: However, de Beauvoir also recognized the ambiguity of the human condition. While we have radical freedom, we are also situated beings constrained by our facticity (our given circumstances and limitations). This creates a tension and anguish in the human experience.\\n\\n3. Responsibility and Bad Faith: With this radical freedom comes great responsibility. De Beauvoir criticized \"bad faith\" - the tendency of people to deny their freedom and responsibility by making excuses or hiding behind social roles and norms.\\n\\n4. Ethical Engagement: For de Beauvoir, true freedom and authenticity required ethical engagement with the world and with others. We must take responsibility for our choices and their impact on others.\\n\\nOverall, de Beauvoir saw free will as a core aspect of the human condition, but one that is fraught with difficulty and ambiguity. Her philosophy emphasized the importance of owning our freedom and using it to ethically shape our lives and world.', response_metadata={'id': 'msg_01A78LdxxsCm6uR8vcAdMQBt', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 20, 'output_tokens': 293}}, id='run-9447a229-5d17-4b20-a48b-7507b78b225a-0')}" + "{'output_message': AIMessage(content='Simone de Beauvoir was a prominent French existentialist philosopher who had some key beliefs about free will:\\n\\n1. Radical Freedom: De Beauvoir believed that humans have radical freedom - the ability to choose and define themselves through their actions. She rejected determinism and believed that we are not simply products of our biology, upbringing, or social circumstances.\\n\\n2. Ambiguity of the Human Condition: However, de Beauvoir also recognized the ambiguity of the human condition. While we have radical freedom, we are also situated beings constrained by our facticity (our given circumstances and limitations). This creates a tension and anguish in the human experience.\\n\\n3. Responsibility and Bad Faith: With radical freedom comes great responsibility. De Beauvoir criticized \"bad faith\" - the denial or avoidance of this responsibility by making excuses or pretending we lack free will. She believed we must courageously embrace our freedom and the burdens it entails.\\n\\n4. Ethical Engagement: For de Beauvoir, freedom is not just an abstract philosophical concept, but something that must be exercised through ethical engagement with the world and others. Our choices and actions have moral implications that we must grapple with.\\n\\nOverall, de Beauvoir\\'s perspective on free will was grounded in existentialist principles - the belief that we are fundamentally free, yet this freedom is fraught with difficulty and responsibility. Her views emphasized the centrality of human agency and the ethical dimensions of our choices.', response_metadata={'id': 'msg_01QFXHx74GSzcMWnWc8YxYSJ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 20, 'output_tokens': 324}}, id='run-752513bc-2b4f-4cad-87f0-b96fee6ebe43-0')}" ] }, - "execution_count": 9, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -356,17 +459,17 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "id": "efb57ef5-91f9-426b-84b9-b77f071a9dd7", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'output_message': AIMessage(content=\"Simone de Beauvoir's views on free will were quite similar, but not identical, to those of her long-time partner Jean-Paul Sartre, another prominent existentialist philosopher.\\n\\nKey similarities:\\n\\n1. Radical Freedom: Both de Beauvoir and Sartre believed that humans have radical, unconditioned freedom to choose and define themselves.\\n\\n2. Rejection of Determinism: They both rejected deterministic views that see humans as products of their circumstances or biology.\\n\\n3. Emphasis on Responsibility: They agreed that with radical freedom comes great responsibility for one's choices and their consequences.\\n\\nKey differences:\\n\\n1. Ambiguity of the Human Condition: While Sartre emphasized the pure, unconditioned nature of human freedom, de Beauvoir recognized the ambiguity of the human condition - our freedom is constrained by our facticity (circumstances).\\n\\n2. Ethical Engagement: De Beauvoir placed more emphasis on the importance of ethical engagement with the world and others, whereas Sartre's focus was more on the individual's freedom.\\n\\n3. Gendered Perspectives: As a woman, de Beauvoir's perspective was more attuned to issues of gender and the lived experience of women, which shaped her views on freedom and ethics.\\n\\nSo in summary, while Sartre and de Beauvoir shared a core existentialist philosophy centered on radical human freedom, de Beauvoir's thought incorporated a greater recognition of the ambiguity and ethical dimensions of the human condition. This reflected her distinct feminist and phenomenological approach.\", response_metadata={'id': 'msg_01U6X3KNPufVg3zFvnx24eKq', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 324, 'output_tokens': 338}}, id='run-c4a984bd-33c6-4e26-a4d1-d58b666d065c-0')}" + "{'output_message': AIMessage(content='Simone de Beauvoir\\'s views on free will were quite similar to those of her long-time partner and fellow existentialist philosopher, Jean-Paul Sartre. There are some key parallels and differences:\\n\\nSimilarities:\\n\\n1. Radical Freedom: Both de Beauvoir and Sartre believed that humans have radical, unconditioned freedom to choose and define themselves.\\n\\n2. Rejection of Determinism: They both rejected deterministic views that see humans as products of their circumstances or nature.\\n\\n3. Emphasis on Responsibility: They agreed that with radical freedom comes great responsibility for one\\'s choices and actions.\\n\\n4. Critique of \"Bad Faith\": Both philosophers criticized the tendency of people to deny or avoid their freedom through self-deception and making excuses.\\n\\nDifferences:\\n\\n1. Gendered Perspectives: While Sartre developed a more gender-neutral existentialist philosophy, de Beauvoir brought a distinctly feminist lens, exploring the unique challenges and experiences of women\\'s freedom.\\n\\n2. Ethical Engagement: De Beauvoir placed more emphasis on the importance of ethical engagement with the world and others, whereas Sartre\\'s focus was more individualistic.\\n\\n3. Ambiguity of the Human Condition: De Beauvoir was more attuned to the ambiguity and tensions inherent in the human condition, whereas Sartre\\'s views were sometimes seen as more absolutist.\\n\\n4. Influence of Phenomenology: De Beauvoir was more influenced by the phenomenological tradition, which shaped her understanding of embodied, situated freedom.\\n\\nOverall, while Sartre and de Beauvoir shared a core existentialist framework, de Beauvoir\\'s unique feminist perspective and emphasis on ethical engagement with others distinguished her views on free will and the human condition.', response_metadata={'id': 'msg_01BEANW4VX6cUWYjkv3CanLz', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 355, 'output_tokens': 388}}, id='run-e786ab3a-1a42-45f3-94a3-f0c591430df3-0')}" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -396,7 +499,7 @@ "data": { "text/plain": [ "RunnableWithMessageHistory(bound=RunnableBinding(bound=RunnableBinding(bound=RunnableLambda(_enter_history), config={'run_name': 'load_history'})\n", - "| RunnableBinding(bound=ChatAnthropic(model='claude-3-haiku-20240307', temperature=0.0, anthropic_api_url='https://api.anthropic.com', anthropic_api_key=SecretStr('**********'), _client=, _async_client=), config_factories=[. at 0x1473dd000>]), config={'run_name': 'RunnableWithMessageHistory'}), get_session_history=, history_factory_config=[ConfigurableFieldSpec(id='session_id', annotation=, name='Session ID', description='Unique identifier for a session.', default='', is_shared=True, dependencies=None)])" + "| RunnableBinding(bound=ChatAnthropic(model='claude-3-haiku-20240307', temperature=0.0, anthropic_api_url='https://api.anthropic.com', anthropic_api_key=SecretStr('**********'), _client=, _async_client=), config_factories=[. at 0x106aeef20>]), config={'run_name': 'RunnableWithMessageHistory'}), get_session_history=, history_factory_config=[ConfigurableFieldSpec(id='session_id', annotation=, name='Session ID', description='Unique identifier for a session.', default='', is_shared=True, dependencies=None)])" ] }, "execution_count": 12, @@ -432,7 +535,7 @@ " input_messages: RunnableBinding(bound=RunnableLambda(_enter_history), config={'run_name': 'load_history'})\n", "}), config={'run_name': 'insert_history'})\n", "| RunnableBinding(bound=RunnableLambda(itemgetter('input_messages'))\n", - " | ChatAnthropic(model='claude-3-haiku-20240307', temperature=0.0, anthropic_api_url='https://api.anthropic.com', anthropic_api_key=SecretStr('**********'), _client=, _async_client=), config_factories=[. at 0x1473df6d0>]), config={'run_name': 'RunnableWithMessageHistory'}), get_session_history=, input_messages_key='input_messages', history_factory_config=[ConfigurableFieldSpec(id='session_id', annotation=, name='Session ID', description='Unique identifier for a session.', default='', is_shared=True, dependencies=None)])" + " | ChatAnthropic(model='claude-3-haiku-20240307', temperature=0.0, anthropic_api_url='https://api.anthropic.com', anthropic_api_key=SecretStr('**********'), _client=, _async_client=), config_factories=[. at 0x106aef560>]), config={'run_name': 'RunnableWithMessageHistory'}), get_session_history=, input_messages_key='input_messages', history_factory_config=[ConfigurableFieldSpec(id='session_id', annotation=, name='Session ID', description='Unique identifier for a session.', default='', is_shared=True, dependencies=None)])" ] }, "execution_count": 13, @@ -478,7 +581,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "477d04b3-c2b6-4ba5-962f-492c0d625cd5", "metadata": {}, "outputs": [], @@ -499,7 +602,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "cd6a250e-17fe-4368-a39d-1fe6b2cbde68", "metadata": {}, "outputs": [], @@ -522,7 +625,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "2afc1556-8da1-4499-ba11-983b66c58b18", "metadata": {}, "outputs": [], @@ -541,7 +644,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "id": "ca7c64d8-e138-4ef8-9734-f82076c47d80", "metadata": {}, "outputs": [], @@ -571,17 +674,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "id": "a85bcc22-ca4c-4ad5-9440-f94be7318f3e", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse in a right triangle.')" + "AIMessage(content='Cosine is a trigonometric function that represents the ratio of the adjacent side to the hypotenuse of a right triangle.', response_metadata={'id': 'msg_01DwU2BD8KPLoXeZ6bZPqxxJ', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 164, 'output_tokens': 31}}, id='run-c2a443c4-79b1-4b07-bb42-5e9112e5bbfc-0')" ] }, - "execution_count": 11, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -595,17 +698,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "id": "ab29abd3-751f-41ce-a1b0-53f6b565e79d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content='The inverse of cosine is the arccosine function, denoted as acos or cos^-1, which gives the angle corresponding to a given cosine value.')" + "AIMessage(content='The inverse of cosine is called arccosine or inverse cosine.', response_metadata={'id': 'msg_01XYH5iCUokxV1UDhUa8xzna', 'model': 'claude-3-haiku-20240307', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 202, 'output_tokens': 19}}, id='run-97dda3a2-01e3-42e5-8241-f948e7535ffc-0')" ] }, - "execution_count": 12, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -622,7 +725,7 @@ "id": "da3d1feb-b4bb-4624-961c-7db2e1180df7", "metadata": {}, "source": [ - ":::{.callout-tip}\n", + ":::tip\n", "\n", "[Langsmith trace](https://smith.langchain.com/public/bd73e122-6ec1-48b2-82df-e6483dc9cb63/r)\n", "\n", @@ -666,7 +769,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/docs/docs/how_to/multimodal_inputs.ipynb b/docs/docs/how_to/multimodal_inputs.ipynb new file mode 100644 index 00000000000..c64ea49d61c --- /dev/null +++ b/docs/docs/how_to/multimodal_inputs.ipynb @@ -0,0 +1,228 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", + "metadata": {}, + "source": [ + "# How to pass multimodal data directly to models\n", + "\n", + "Here we demonstrate how to pass multimodal input directly to models. \n", + "We currently expect all input to be passed in the same format as [OpenAI expects](https://platform.openai.com/docs/guides/vision).\n", + "For other model providers that support multimodal input, we have added logic inside the class to convert to the expected format.\n", + "\n", + "In this example we will ask a model to describe an image." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", + "metadata": {}, + "outputs": [], + "source": [ + "image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fb896ce9", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.messages import HumanMessage\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "model = ChatOpenAI(model=\"gpt-4o\")" + ] + }, + { + "cell_type": "markdown", + "id": "4fca4da7", + "metadata": {}, + "source": [ + "The most commonly supported way to pass in images is to pass it in as a byte string.\n", + "This should work for most model integrations." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9ca1040c", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "\n", + "import httpx\n", + "\n", + "image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ec680b6b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions.\n" + ] + } + ], + "source": [ + "message = HumanMessage(\n", + " content=[\n", + " {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\"url\": f\"data:image/jpeg;base64,{image_data}\"},\n", + " },\n", + " ],\n", + ")\n", + "response = model.invoke([message])\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "id": "8656018e-c56d-47d2-b2be-71e87827f90a", + "metadata": {}, + "source": [ + "We can feed the image URL directly in a content block of type \"image_url\". Note that only some model providers support this." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered clouds, suggesting good visibility and a likely pleasant temperature. The bright sunlight is casting distinct shadows on the grass and vegetation, indicating it is likely daytime, possibly late morning or early afternoon. The overall ambiance suggests a warm and inviting day, suitable for outdoor activities.\n" + ] + } + ], + "source": [ + "message = HumanMessage(\n", + " content=[\n", + " {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n", + " {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", + " ],\n", + ")\n", + "response = model.invoke([message])\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "id": "1c470309", + "metadata": {}, + "source": [ + "We can also pass in multiple images." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "325fb4ca", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Yes, the two images are the same. They both depict a wooden boardwalk extending through a grassy field under a blue sky with light clouds. The scenery, lighting, and composition are identical.\n" + ] + } + ], + "source": [ + "message = HumanMessage(\n", + " content=[\n", + " {\"type\": \"text\", \"text\": \"are these two images the same?\"},\n", + " {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", + " {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", + " ],\n", + ")\n", + "response = model.invoke([message])\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "id": "71bd28cf-d76c-44e2-a55e-c5f265db986e", + "metadata": {}, + "source": [ + "## Tool calls\n", + "\n", + "Some multimodal models support [tool calling](/docs/concepts/#functiontool-calling) features as well. To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data)." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "cd22ea82-2f93-46f9-9f7a-6aaf479fcaa9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_BSX4oq4SKnLlp2WlzDhToHBr'}]\n" + ] + } + ], + "source": [ + "from typing import Literal\n", + "\n", + "from langchain_core.tools import tool\n", + "\n", + "\n", + "@tool\n", + "def weather_tool(weather: Literal[\"sunny\", \"cloudy\", \"rainy\"]) -> None:\n", + " \"\"\"Describe the weather\"\"\"\n", + " pass\n", + "\n", + "\n", + "model_with_tools = model.bind_tools([weather_tool])\n", + "\n", + "message = HumanMessage(\n", + " content=[\n", + " {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n", + " {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", + " ],\n", + ")\n", + "response = model_with_tools.invoke([message])\n", + "print(response.tool_calls)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/how_to/multimodal_prompts.ipynb b/docs/docs/how_to/multimodal_prompts.ipynb new file mode 100644 index 00000000000..6a41ad3673e --- /dev/null +++ b/docs/docs/how_to/multimodal_prompts.ipynb @@ -0,0 +1,184 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", + "metadata": {}, + "source": [ + "# How to use multimodal prompts\n", + "\n", + "Here we demonstrate how to use prompt templates to format multimodal inputs to models. \n", + "\n", + "In this example we will ask a model to describe an image." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "\n", + "import httpx\n", + "\n", + "image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n", + "image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "2671f995", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "model = ChatOpenAI(model=\"gpt-4o\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "4ee35e4f", + "metadata": {}, + "outputs": [], + "source": [ + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", \"Describe the image provided\"),\n", + " (\n", + " \"user\",\n", + " [{\"type\": \"image_url\", \"image_url\": \"data:image/jpeg;base64,{image_data}\"}],\n", + " ),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "089f75c2", + "metadata": {}, + "outputs": [], + "source": [ + "chain = prompt | model" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "02744b06", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The image depicts a sunny day with a beautiful blue sky filled with scattered white clouds. The sky has varying shades of blue, ranging from a deeper hue near the horizon to a lighter, almost pale blue higher up. The white clouds are fluffy and scattered across the expanse of the sky, creating a peaceful and serene atmosphere. The lighting and cloud patterns suggest pleasant weather conditions, likely during the daytime hours on a mild, sunny day in an outdoor natural setting.\n" + ] + } + ], + "source": [ + "response = chain.invoke({\"image_data\": image_data})\n", + "print(response.content)" + ] + }, + { + "cell_type": "markdown", + "id": "e9b9ebf6", + "metadata": {}, + "source": [ + "We can also pass in multiple images." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "02190ee3", + "metadata": {}, + "outputs": [], + "source": [ + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", \"compare the two pictures provided\"),\n", + " (\n", + " \"user\",\n", + " [\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": \"data:image/jpeg;base64,{image_data1}\",\n", + " },\n", + " {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": \"data:image/jpeg;base64,{image_data2}\",\n", + " },\n", + " ],\n", + " ),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "42af057b", + "metadata": {}, + "outputs": [], + "source": [ + "chain = prompt | model" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "513abe00", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The two images provided are identical. Both images feature a wooden boardwalk path extending through a lush green field under a bright blue sky with some clouds. The perspective, colors, and elements in both images are exactly the same.\n" + ] + } + ], + "source": [ + "response = chain.invoke({\"image_data1\": image_data, \"image_data2\": image_data})\n", + "print(response.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea8152c3", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/how_to/sql_csv.ipynb b/docs/docs/how_to/sql_csv.ipynb index 3e3ed1ce51e..fc379c219db 100644 --- a/docs/docs/how_to/sql_csv.ipynb +++ b/docs/docs/how_to/sql_csv.ipynb @@ -503,7 +503,7 @@ } ], "source": [ - "chain = prompt | llm_with_tools | parser | tool # noqa\n", + "chain = prompt | llm_with_tools | parser | tool\n", "chain.invoke({\"question\": \"What's the correlation between age and fare\"})" ] }, diff --git a/docs/docs/how_to/sql_large_db.ipynb b/docs/docs/how_to/sql_large_db.ipynb index 6b98c1c5160..199c3c4f765 100644 --- a/docs/docs/how_to/sql_large_db.ipynb +++ b/docs/docs/how_to/sql_large_db.ipynb @@ -262,7 +262,7 @@ " return tables\n", "\n", "\n", - "table_chain = category_chain | get_tables # noqa\n", + "table_chain = category_chain | get_tables\n", "table_chain.invoke({\"input\": \"What are all the genres of Alanis Morisette songs\"})" ] }, diff --git a/docs/docs/how_to/tool_calls_multi_modal.ipynb b/docs/docs/how_to/tool_calls_multi_modal.ipynb deleted file mode 100644 index 1550d843a92..00000000000 --- a/docs/docs/how_to/tool_calls_multi_modal.ipynb +++ /dev/null @@ -1,160 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "4facdf7f-680e-4d28-908b-2b8408e2a741", - "metadata": {}, - "source": [ - "# How to call tools with multi-modal data\n", - "\n", - "Here we demonstrate how to call tools with multi-modal data, such as images.\n", - "\n", - "Some multi-modal models, such as those that can reason over images or audio, support [tool calling](/docs/concepts/#functiontool-calling) features as well.\n", - "\n", - "To call tools using such models, simply bind tools to them in the [usual way](/docs/how_to/tool_calling), and invoke the model using content blocks of the desired type (e.g., containing image data).\n", - "\n", - "Below, we demonstrate examples using [OpenAI](/docs/integrations/platforms/openai) and [Anthropic](/docs/integrations/platforms/anthropic). We will use the same image and tool in all cases. Let's first select an image, and build a placeholder tool that expects as input the string \"sunny\", \"cloudy\", or \"rainy\". We will ask the models to describe the weather in the image." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "0d9fd81a-b7f0-445a-8e3d-cfc2d31fdd59", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Literal\n", - "\n", - "from langchain_core.tools import tool\n", - "\n", - "image_url = \"https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg\"\n", - "\n", - "\n", - "@tool\n", - "def weather_tool(weather: Literal[\"sunny\", \"cloudy\", \"rainy\"]) -> None:\n", - " \"\"\"Describe the weather\"\"\"\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "id": "8656018e-c56d-47d2-b2be-71e87827f90a", - "metadata": {}, - "source": [ - "## OpenAI\n", - "\n", - "For OpenAI, we can feed the image URL directly in a content block of type \"image_url\":" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "a8819cf3-5ddc-44f0-889a-19ca7b7fe77e", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'call_mRYL50MtHdeNuNIjSCm5UPmB'}]\n" - ] - } - ], - "source": [ - "from langchain_core.messages import HumanMessage\n", - "from langchain_openai import ChatOpenAI\n", - "\n", - "model = ChatOpenAI(model=\"gpt-4o\").bind_tools([weather_tool])\n", - "\n", - "message = HumanMessage(\n", - " content=[\n", - " {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n", - " {\"type\": \"image_url\", \"image_url\": {\"url\": image_url}},\n", - " ],\n", - ")\n", - "response = model.invoke([message])\n", - "print(response.tool_calls)" - ] - }, - { - "cell_type": "markdown", - "id": "e5738224-1109-4bf8-8976-ff1570dd1d46", - "metadata": {}, - "source": [ - "Note that we recover tool calls with parsed arguments in LangChain's [standard format](/docs/how_to/tool_calling) in the model response." - ] - }, - { - "cell_type": "markdown", - "id": "0cee63ff-e09f-4dd8-8323-912edbde94f6", - "metadata": {}, - "source": [ - "## Anthropic\n", - "\n", - "For Anthropic, we can format a base64-encoded image into a content block of type \"image\", as below:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d90c4590-71c8-42b1-99ff-03a9eca8082e", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'name': 'weather_tool', 'args': {'weather': 'sunny'}, 'id': 'toolu_016m9KfknJqx5fVRYk4tkF6s'}]\n" - ] - } - ], - "source": [ - "import base64\n", - "\n", - "import httpx\n", - "from langchain_anthropic import ChatAnthropic\n", - "\n", - "image_data = base64.b64encode(httpx.get(image_url).content).decode(\"utf-8\")\n", - "\n", - "model = ChatAnthropic(model=\"claude-3-sonnet-20240229\").bind_tools([weather_tool])\n", - "\n", - "message = HumanMessage(\n", - " content=[\n", - " {\"type\": \"text\", \"text\": \"describe the weather in this image\"},\n", - " {\n", - " \"type\": \"image\",\n", - " \"source\": {\n", - " \"type\": \"base64\",\n", - " \"media_type\": \"image/jpeg\",\n", - " \"data\": image_data,\n", - " },\n", - " },\n", - " ],\n", - ")\n", - "response = model.invoke([message])\n", - "print(response.tool_calls)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/docs/integrations/callbacks/confident.ipynb b/docs/docs/integrations/callbacks/confident.ipynb index a5110206291..e1fdc34c955 100644 --- a/docs/docs/integrations/callbacks/confident.ipynb +++ b/docs/docs/integrations/callbacks/confident.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai deepeval langchain-chroma" + "%pip install --upgrade --quiet langchain langchain-openai langchain-community deepeval langchain-chroma" ] }, { diff --git a/docs/docs/integrations/callbacks/context.ipynb b/docs/docs/integrations/callbacks/context.ipynb index 8b3330265be..c4b3fcea930 100644 --- a/docs/docs/integrations/callbacks/context.ipynb +++ b/docs/docs/integrations/callbacks/context.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai context-python" + "%pip install --upgrade --quiet langchain langchain-openai langchain-community context-python" ] }, { diff --git a/docs/docs/integrations/callbacks/infino.ipynb b/docs/docs/integrations/callbacks/infino.ipynb index 07831d5ef98..ff254ff687a 100644 --- a/docs/docs/integrations/callbacks/infino.ipynb +++ b/docs/docs/integrations/callbacks/infino.ipynb @@ -36,7 +36,8 @@ "# Install necessary dependencies.\n", "%pip install --upgrade --quiet infinopy\n", "%pip install --upgrade --quiet matplotlib\n", - "%pip install --upgrade --quiet tiktoken" + "%pip install --upgrade --quiet tiktoken\n", + "%pip install --upgrade --quiet langchain langchain-openai langchain-community" ] }, { diff --git a/docs/docs/integrations/callbacks/labelstudio.ipynb b/docs/docs/integrations/callbacks/labelstudio.ipynb index e6841025c92..89158e38c73 100644 --- a/docs/docs/integrations/callbacks/labelstudio.ipynb +++ b/docs/docs/integrations/callbacks/labelstudio.ipynb @@ -56,7 +56,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai" + "%pip install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai langchain-community" ] }, { diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index dd749afcf01..06f2e602409 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -32,7 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet promptlayer --upgrade" + "%pip install --upgrade --quiet langchain-community promptlayer --upgrade" ] }, { diff --git a/docs/docs/integrations/callbacks/trubrics.ipynb b/docs/docs/integrations/callbacks/trubrics.ipynb index 48ba49cc36e..27ddf288415 100644 --- a/docs/docs/integrations/callbacks/trubrics.ipynb +++ b/docs/docs/integrations/callbacks/trubrics.ipynb @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet trubrics" + "%pip install --upgrade --quiet trubrics langchain langchain-community" ] }, { diff --git a/docs/docs/integrations/callbacks/uptrain.ipynb b/docs/docs/integrations/callbacks/uptrain.ipynb index 0a42c24e855..7dcf97c5175 100644 --- a/docs/docs/integrations/callbacks/uptrain.ipynb +++ b/docs/docs/integrations/callbacks/uptrain.ipynb @@ -81,7 +81,7 @@ } ], "source": [ - "%pip install -qU langchain langchain_openai uptrain faiss-cpu flashrank" + "%pip install -qU langchain langchain_openai langchain-community uptrain faiss-cpu flashrank" ] }, { diff --git a/docs/docs/integrations/chat/anthropic.ipynb b/docs/docs/integrations/chat/anthropic.ipynb index 6ca0db0e5ab..0120e7f0442 100644 --- a/docs/docs/integrations/chat/anthropic.ipynb +++ b/docs/docs/integrations/chat/anthropic.ipynb @@ -670,7 +670,7 @@ " \"type\": \"image_url\",\n", " \"image_url\": {\n", " # langchain logo\n", - " \"url\": f\"data:image/png;base64,{img_base64}\", # noqa: E501\n", + " \"url\": f\"data:image/png;base64,{img_base64}\",\n", " },\n", " },\n", " {\"type\": \"text\", \"text\": \"What is this logo for?\"},\n", diff --git a/docs/docs/integrations/chat/kinetica.ipynb b/docs/docs/integrations/chat/kinetica.ipynb index 36bb049e782..93fd51f4acf 100644 --- a/docs/docs/integrations/chat/kinetica.ipynb +++ b/docs/docs/integrations/chat/kinetica.ipynb @@ -62,10 +62,10 @@ "%pip install --upgrade --quiet langchain-core langchain-community\n", "\n", "# Install Kineitca DB connection package\n", - "%pip install --upgrade --quiet gpudb typeguard\n", + "%pip install --upgrade --quiet 'gpudb>=7.2.0.8' typeguard pandas tqdm\n", "\n", "# Install packages needed for this tutorial\n", - "%pip install --upgrade --quiet faker" + "%pip install --upgrade --quiet faker ipykernel " ] }, { @@ -114,7 +114,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -139,11 +139,11 @@ "\n", " birthdate \n", "id \n", - "0 1997-12-01 \n", - "1 1924-07-27 \n", - "2 1933-11-28 \n", - "3 1988-10-19 \n", - "4 1931-03-12 \n" + "0 1997-12-08 \n", + "1 1924-08-03 \n", + "2 1933-12-05 \n", + "3 1988-10-26 \n", + "4 1931-03-19 \n" ] } ], @@ -222,39 +222,60 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CREATE OR REPLACE CONTEXT \"demo\".\"test_llm_ctx\" (\n", + " TABLE = \"demo\".\"user_profiles\",\n", + " COMMENT = 'Contains user profiles.'\n", + "),\n", + "(\n", + " SAMPLES = ( \n", + " 'How many male users are there?' = 'select count(1) as num_users\n", + " from demo.user_profiles\n", + " where sex = ''M'';' )\n", + ")\n" + ] + }, { "data": { "text/plain": [ "1" ] }, - "execution_count": 4, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# create an LLM context for the table.\n", + "from gpudb import GPUdbSamplesClause, GPUdbSqlContext, GPUdbTableClause\n", "\n", - "sql = f\"\"\"\n", - "CREATE OR REPLACE CONTEXT {kinetica_ctx}\n", - "(\n", - " TABLE = {table_name}\n", - " COMMENT = 'Contains user profiles.'\n", - "),\n", - "(\n", - " SAMPLES = (\n", - " 'How many male users are there?' = \n", - " 'select count(1) as num_users\n", - " from {table_name}\n", - " where sex = ''M'';')\n", + "table_ctx = GPUdbTableClause(table=table_name, comment=\"Contains user profiles.\")\n", + "\n", + "samples_ctx = GPUdbSamplesClause(\n", + " samples=[\n", + " (\n", + " \"How many male users are there?\",\n", + " f\"\"\"\n", + " select count(1) as num_users\n", + " from {table_name}\n", + " where sex = 'M';\n", + " \"\"\",\n", + " )\n", + " ]\n", ")\n", - "\"\"\"\n", "\n", - "count_affected = kinetica_llm.kdbc.execute(sql)\n", + "context_sql = GPUdbSqlContext(\n", + " name=kinetica_ctx, tables=[table_ctx], samples=samples_ctx\n", + ").build_sql()\n", + "\n", + "print(context_sql)\n", + "count_affected = kinetica_llm.kdbc.execute(context_sql)\n", "count_affected" ] }, @@ -273,7 +294,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -334,7 +355,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -357,7 +378,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -404,7 +425,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.19" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/docs/docs/integrations/chat/premai.ipynb b/docs/docs/integrations/chat/premai.ipynb index 13a2ece2733..2e26a39befe 100644 --- a/docs/docs/integrations/chat/premai.ipynb +++ b/docs/docs/integrations/chat/premai.ipynb @@ -15,10 +15,9 @@ "source": [ "# ChatPremAI\n", "\n", - ">[PremAI](https://app.premai.io) is a unified platform that lets you build powerful production-ready GenAI-powered applications with the least effort so that you can focus more on user experience and overall growth. \n", + "[PremAI](https://premai.io/) is an all-in-one platform that simplifies the creation of robust, production-ready applications powered by Generative AI. By streamlining the development process, PremAI allows you to concentrate on enhancing user experience and driving overall growth for your application. You can quickly start using our platform [here](https://docs.premai.io/quick-start).\n", "\n", - "\n", - "This example goes over how to use LangChain to interact with `ChatPremAI`. " + "This example goes over how to use LangChain to interact with different chat models with `ChatPremAI`" ] }, { @@ -27,23 +26,13 @@ "source": [ "### Installation and setup\n", "\n", - "We start by installing langchain and premai-sdk. You can type the following command to install:\n", + "We start by installing `langchain` and `premai-sdk`. You can type the following command to install:\n", "\n", "```bash\n", "pip install premai langchain\n", "```\n", "\n", - "Before proceeding further, please make sure that you have made an account on PremAI and already started a project. If not, then here's how you can start for free:\n", - "\n", - "1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/).\n", - "\n", - "2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. \n", - "\n", - "3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. \n", - "\n", - "4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like max-tokens, temperature, etc) and also pre-set your system prompt. \n", - "\n", - "Congratulations on creating your first deployed application on PremAI 🎉 Now we can use langchain to interact with our application. " + "Before proceeding further, please make sure that you have made an account on PremAI and already created a project. If not, please refer to the [quick start](https://docs.premai.io/introduction) guide to get started with the PremAI platform. Create your first project and grab your API key." ] }, { @@ -60,13 +49,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Setup ChatPremAI instance in LangChain \n", + "### Setup PremAI client in LangChain\n", "\n", - "Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error.\n", + "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is `8`. But make sure you use your project-id, otherwise it will throw error.\n", "\n", - "To use langchain with prem, you do not need to pass any model name or set any parameters with our chat client. All of those will use the default model name and parameters of the LaunchPad model. \n", + "To use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. By default it will use the model name and parameters used in the [LaunchPad](https://docs.premai.io/get-started/launchpad). \n", "\n", - "`NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. " + "> Note: If you change the `model` or any other parameters like `temperature` or `max_tokens` while setting the client, it will override existing default configurations, that was used in LaunchPad. " ] }, { @@ -102,13 +91,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Calling the Model\n", + "### Chat Completions\n", "\n", - "Now you are all set. We can now start by interacting with our application. `ChatPremAI` supports two methods `invoke` (which is the same as `generate`) and `stream`. \n", + "`ChatPremAI` supports two methods: `invoke` (which is the same as `generate`) and `stream`. \n", "\n", - "The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. \n", - "\n", - "### Generation" + "The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. " ] }, { @@ -165,7 +152,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can also change generation parameters while calling the model. Here's how you can do that" + "You can provide system prompt here like this:" ] }, { @@ -192,15 +179,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Important notes:\n", + "> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform. \n", "\n", - "Before proceeding further, please note that the current version of ChatPrem does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) are not supported. \n", - "\n", - "We will provide support for those two above parameters in sooner versions. \n", + "> Please note that the current version of ChatPremAI does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop). \n", "\n", "### Streaming\n", "\n", - "And finally, here's how you do token streaming for dynamic chat like applications. " + "In this section, let's see how we can stream tokens using langchain and PremAI. Here's how you do it. " ] }, { @@ -228,7 +213,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Similar to above, if you want to override the system-prompt and the generation parameters, here's how you can do it. " + "Similar to above, if you want to override the system-prompt and the generation parameters, you need to add the following:" ] }, { diff --git a/docs/docs/integrations/chat_loaders/discord.ipynb b/docs/docs/integrations/chat_loaders/discord.ipynb index 515d8cf27f7..6b4c2529e9f 100644 --- a/docs/docs/integrations/chat_loaders/discord.ipynb +++ b/docs/docs/integrations/chat_loaders/discord.ipynb @@ -95,7 +95,7 @@ " \"\"\"\n", " self.path = path\n", " self._message_line_regex = re.compile(\n", - " r\"(.+?) — (\\w{3,9} \\d{1,2}(?:st|nd|rd|th)?(?:, \\d{4})? \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n", + " r\"(.+?) — (\\w{3,9} \\d{1,2}(?:st|nd|rd|th)?(?:, \\d{4})? \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\",\n", " flags=re.DOTALL,\n", " )\n", "\n", @@ -120,7 +120,7 @@ " current_content = []\n", " for line in lines:\n", " if re.match(\n", - " r\".+? — (\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n", + " r\".+? — (\\d{2}/\\d{2}/\\d{4} \\d{1,2}:\\d{2} (?:AM|PM)|Today at \\d{1,2}:\\d{2} (?:AM|PM)|Yesterday at \\d{1,2}:\\d{2} (?:AM|PM))\",\n", " line,\n", " ):\n", " if current_sender and current_content:\n", diff --git a/docs/docs/integrations/chat_loaders/wechat.ipynb b/docs/docs/integrations/chat_loaders/wechat.ipynb index b2602070ae5..40aee0e1ee2 100644 --- a/docs/docs/integrations/chat_loaders/wechat.ipynb +++ b/docs/docs/integrations/chat_loaders/wechat.ipynb @@ -94,7 +94,7 @@ " \"\"\"\n", " self.path = path\n", " self._message_line_regex = re.compile(\n", - " r\"(?P.+?) (?P\\d{4}/\\d{2}/\\d{2} \\d{1,2}:\\d{2} (?:AM|PM))\", # noqa\n", + " r\"(?P.+?) (?P\\d{4}/\\d{2}/\\d{2} \\d{1,2}:\\d{2} (?:AM|PM))\",\n", " # flags=re.DOTALL,\n", " )\n", "\n", diff --git a/docs/docs/integrations/document_loaders/async_html.ipynb b/docs/docs/integrations/document_loaders/async_html.ipynb index 95bb82efaac..3de66e3e8b3 100644 --- a/docs/docs/integrations/document_loaders/async_html.ipynb +++ b/docs/docs/integrations/document_loaders/async_html.ipynb @@ -37,6 +37,10 @@ "source": [ "urls = [\"https://www.espn.com\", \"https://lilianweng.github.io/posts/2023-06-23-agent/\"]\n", "loader = AsyncHtmlLoader(urls)\n", + "# If you need to use the proxy to make web requests, for example using http_proxy/https_proxy environmental variables,\n", + "# please set trust_env=True explicitly here as follows:\n", + "# loader = AsyncHtmlLoader(urls, trust_env=True)\n", + "# Otherwise, loader.load() may stuck becuase aiohttp session does not recognize the proxy by default\n", "docs = loader.load()" ] }, diff --git a/docs/docs/integrations/document_loaders/example_data/source_code/example.py b/docs/docs/integrations/document_loaders/example_data/source_code/example.py index 5838764ee40..2a2760b6a60 100644 --- a/docs/docs/integrations/document_loaders/example_data/source_code/example.py +++ b/docs/docs/integrations/document_loaders/example_data/source_code/example.py @@ -3,7 +3,7 @@ class MyClass: self.name = name def greet(self): - print(f"Hello, {self.name}!") # noqa: T201 + print(f"Hello, {self.name}!") def main(): diff --git a/docs/docs/integrations/document_loaders/scrapfly.ipynb b/docs/docs/integrations/document_loaders/scrapfly.ipynb new file mode 100644 index 00000000000..2625e3d3fb9 --- /dev/null +++ b/docs/docs/integrations/document_loaders/scrapfly.ipynb @@ -0,0 +1,107 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ScrapFly\n", + "[ScrapFly](https://scrapfly.io/) is a web scraping API with headless browser capabilities, proxies, and anti-bot bypass. It allows for extracting web page data into accessible LLM markdown or text." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Installation\n", + "Install ScrapFly Python SDK and he required Langchain packages using pip:\n", + "```shell\n", + "pip install scrapfly-sdk langchain langchain-community\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Usage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.document_loaders import ScrapflyLoader\n", + "\n", + "scrapfly_loader = ScrapflyLoader(\n", + " [\"https://web-scraping.dev/products\"],\n", + " api_key=\"Your ScrapFly API key\", # Get your API key from https://www.scrapfly.io/\n", + " ignore_scrape_failures=True, # Ignore unprocessable web pages and log their exceptions\n", + ")\n", + "\n", + "# Load documents from URLs as markdown\n", + "documents = scrapfly_loader.load()\n", + "print(documents)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The ScrapflyLoader also allows passigng ScrapeConfig object for customizing the scrape request. See the documentation for the full feature details and their API params: https://scrapfly.io/docs/scrape-api/getting-started" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.document_loaders import ScrapflyLoader\n", + "\n", + "scrapfly_scrape_config = {\n", + " \"asp\": True, # Bypass scraping blocking and antibot solutions, like Cloudflare\n", + " \"render_js\": True, # Enable JavaScript rendering with a cloud headless browser\n", + " \"proxy_pool\": \"public_residential_pool\", # Select a proxy pool (datacenter or residnetial)\n", + " \"country\": \"us\", # Select a proxy location\n", + " \"auto_scroll\": True, # Auto scroll the page\n", + " \"js\": \"\", # Execute custom JavaScript code by the headless browser\n", + "}\n", + "\n", + "scrapfly_loader = ScrapflyLoader(\n", + " [\"https://web-scraping.dev/products\"],\n", + " api_key=\"Your ScrapFly API key\", # Get your API key from https://www.scrapfly.io/\n", + " ignore_scrape_failures=True, # Ignore unprocessable web pages and log their exceptions\n", + " scrape_config=scrapfly_scrape_config, # Pass the scrape_config object\n", + " scrape_format=\"markdown\", # The scrape result format, either `markdown`(default) or `text`\n", + ")\n", + "\n", + "# Load documents from URLs as markdown\n", + "documents = scrapfly_loader.load()\n", + "print(documents)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/docs/integrations/document_transformers/rankllm-reranker.ipynb b/docs/docs/integrations/document_transformers/rankllm-reranker.ipynb new file mode 100644 index 00000000000..52727054796 --- /dev/null +++ b/docs/docs/integrations/document_transformers/rankllm-reranker.ipynb @@ -0,0 +1,781 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RankLLM Reranker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[RankLLM](https://github.com/castorini/rank_llm) offers a suite of listwise rerankers, albeit with focus on open source LLMs finetuned for the task - RankVicuna and RankZephyr being two of them." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet rank_llm" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain_openai" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet faiss-cpu" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Helper function for printing docs\n", + "def pretty_print_docs(docs):\n", + " print(\n", + " f\"\\n{'-' * 100}\\n\".join(\n", + " [f\"Document {i+1}:\\n\\n\" + d.page_content for i, d in enumerate(docs)]\n", + " )\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set up the base vector store retriever\n", + "Let's start by initializing a simple vector store retriever and storing the 2023 State of the Union speech (in chunks). We can set up the retriever to retrieve a high number (20) of docs." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "\n", + "documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", + "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n", + "texts = text_splitter.split_documents(documents)\n", + "for idx, text in enumerate(texts):\n", + " text.metadata[\"id\"] = idx\n", + "\n", + "embedding = OpenAIEmbeddings(model=\"text-embedding-ada-002\")\n", + "retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={\"k\": 20})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Retrieval + RankLLM Reranking (RankZephyr)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Retrieval without reranking" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document 1:\n", + "\n", + "And with an unwavering resolve that freedom will always triumph over tyranny. \n", + "\n", + "Six days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n", + "\n", + "He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n", + "\n", + "He met the Ukrainian people.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 2:\n", + "\n", + "Together with our allies –we are right now enforcing powerful economic sanctions. \n", + "\n", + "We are cutting off Russia’s largest banks from the international financial system. \n", + "\n", + "Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. \n", + "\n", + "We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 3:\n", + "\n", + "And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \n", + "\n", + "The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 4:\n", + "\n", + "I spent countless hours unifying our European allies. We shared with the world in advance what we knew Putin was planning and precisely how he would try to falsely justify his aggression. \n", + "\n", + "We countered Russia’s lies with truth. \n", + "\n", + "And now that he has acted the free world is holding him accountable.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 5:\n", + "\n", + "He rejected repeated efforts at diplomacy. \n", + "\n", + "He thought the West and NATO wouldn’t respond. And he thought he could divide us at home. Putin was wrong. We were ready. Here is what we did. \n", + "\n", + "We prepared extensively and carefully. \n", + "\n", + "We spent months building a coalition of other freedom-loving nations from Europe and the Americas to Asia and Africa to confront Putin.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 6:\n", + "\n", + "And now that he has acted the free world is holding him accountable. \n", + "\n", + "Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \n", + "\n", + "We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \n", + "\n", + "Together with our allies –we are right now enforcing powerful economic sanctions.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 7:\n", + "\n", + "To all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n", + "\n", + "And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n", + "\n", + "Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 8:\n", + "\n", + "And we remain clear-eyed. The Ukrainians are fighting back with pure courage. But the next few days weeks, months, will be hard on them. \n", + "\n", + "Putin has unleashed violence and chaos. But while he may make gains on the battlefield – he will pay a continuing high price over the long run. \n", + "\n", + "And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 9:\n", + "\n", + "Tonight I say to the Russian oligarchs and corrupt leaders who have bilked billions of dollars off this violent regime no more. \n", + "\n", + "The U.S. Department of Justice is assembling a dedicated task force to go after the crimes of Russian oligarchs. \n", + "\n", + "We are joining with our European allies to find and seize your yachts your luxury apartments your private jets. We are coming for your ill-begotten gains.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 10:\n", + "\n", + "America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n", + "\n", + "These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n", + "\n", + "But I want you to know that we are going to be okay. \n", + "\n", + "When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 11:\n", + "\n", + "They keep moving. \n", + "\n", + "And the costs and the threats to America and the world keep rising. \n", + "\n", + "That’s why the NATO Alliance was created to secure peace and stability in Europe after World War 2. \n", + "\n", + "The United States is a member along with 29 other nations. \n", + "\n", + "It matters. American diplomacy matters. American resolve matters. \n", + "\n", + "Putin’s latest attack on Ukraine was premeditated and unprovoked. \n", + "\n", + "He rejected repeated efforts at diplomacy.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 12:\n", + "\n", + "Our forces are not going to Europe to fight in Ukraine, but to defend our NATO Allies – in the event that Putin decides to keep moving west. \n", + "\n", + "For that purpose we’ve mobilized American ground forces, air squadrons, and ship deployments to protect NATO countries including Poland, Romania, Latvia, Lithuania, and Estonia. \n", + "\n", + "As I have made crystal clear the United States and our Allies will defend every inch of territory of NATO countries with the full force of our collective power.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 13:\n", + "\n", + "While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly. \n", + "\n", + "We see the unity among leaders of nations and a more unified Europe a more unified West. And we see unity among the people who are gathering in cities in large crowds around the world even in Russia to demonstrate their support for Ukraine.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 14:\n", + "\n", + "He met the Ukrainian people. \n", + "\n", + "From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n", + "\n", + "Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \n", + "\n", + "In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 15:\n", + "\n", + "In the battle between democracy and autocracy, democracies are rising to the moment, and the world is clearly choosing the side of peace and security. \n", + "\n", + "This is a real test. It’s going to take time. So let us continue to draw inspiration from the iron will of the Ukrainian people. \n", + "\n", + "To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. \n", + "\n", + "Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 16:\n", + "\n", + "Together with our allies we are providing support to the Ukrainians in their fight for freedom. Military assistance. Economic assistance. Humanitarian assistance. \n", + "\n", + "We are giving more than $1 Billion in direct assistance to Ukraine. \n", + "\n", + "And we will continue to aid the Ukrainian people as they defend their country and to help ease their suffering. \n", + "\n", + "Let me be clear, our forces are not engaged and will not engage in conflict with Russian forces in Ukraine.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 17:\n", + "\n", + "Let each of us here tonight in this Chamber send an unmistakable signal to Ukraine and to the world. \n", + "\n", + "Please rise if you are able and show that, Yes, we the United States of America stand with the Ukrainian people. \n", + "\n", + "Throughout our history we’ve learned this lesson when dictators do not pay a price for their aggression they cause more chaos. \n", + "\n", + "They keep moving. \n", + "\n", + "And the costs and the threats to America and the world keep rising.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 18:\n", + "\n", + "It fueled our efforts to vaccinate the nation and combat COVID-19. It delivered immediate economic relief for tens of millions of Americans. \n", + "\n", + "Helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. \n", + "\n", + "And as my Dad used to say, it gave people a little breathing room.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 19:\n", + "\n", + "My administration is providing assistance with job training and housing, and now helping lower-income veterans get VA care debt-free. \n", + "\n", + "Our troops in Iraq and Afghanistan faced many dangers. \n", + "\n", + "One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n", + "\n", + "When they came home, many of the world’s fittest and best trained warriors were never the same. \n", + "\n", + "Headaches. Numbness. Dizziness.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 20:\n", + "\n", + "Every Administration says they’ll do it, but we are actually doing it. \n", + "\n", + "We will buy American to make sure everything from the deck of an aircraft carrier to the steel on highway guardrails are made in America. \n", + "\n", + "But to compete for the best jobs of the future, we also need to level the playing field with China and other competitors.\n" + ] + } + ], + "source": [ + "query = \"What was done to Russia?\"\n", + "docs = retriever.invoke(query)\n", + "pretty_print_docs(docs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Retrieval + Reranking with RankZephyr" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n", + "from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank\n", + "\n", + "compressor = RankLLMRerank(top_n=3, model=\"zephyr\")\n", + "compression_retriever = ContextualCompressionRetriever(\n", + " base_compressor=compressor, base_retriever=retriever\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document 1:\n", + "\n", + "Together with our allies –we are right now enforcing powerful economic sanctions. \n", + "\n", + "We are cutting off Russia’s largest banks from the international financial system. \n", + "\n", + "Preventing Russia’s central bank from defending the Russian Ruble making Putin’s $630 Billion “war fund” worthless. \n", + "\n", + "We are choking off Russia’s access to technology that will sap its economic strength and weaken its military for years to come.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 2:\n", + "\n", + "And tonight I am announcing that we will join our allies in closing off American air space to all Russian flights – further isolating Russia – and adding an additional squeeze –on their economy. The Ruble has lost 30% of its value. \n", + "\n", + "The Russian stock market has lost 40% of its value and trading remains suspended. Russia’s economy is reeling and Putin alone is to blame.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 3:\n", + "\n", + "And now that he has acted the free world is holding him accountable. \n", + "\n", + "Along with twenty-seven members of the European Union including France, Germany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and many others, even Switzerland. \n", + "\n", + "We are inflicting pain on Russia and supporting the people of Ukraine. Putin is now isolated from the world more than ever. \n", + "\n", + "Together with our allies –we are right now enforcing powerful economic sanctions.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "compressed_docs = compression_retriever.invoke(query)\n", + "pretty_print_docs(compressed_docs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Can be used within a QA pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'query': 'What was done to Russia?',\n", + " 'result': 'Russia has been subjected to powerful economic sanctions, including cutting off its largest banks from the international financial system, preventing its central bank from defending the Russian Ruble, and choking off its access to technology. Additionally, American airspace has been closed to all Russian flights, further isolating Russia and adding pressure on its economy. These actions have led to a significant devaluation of the Ruble, a sharp decline in the Russian stock market, and overall economic turmoil in Russia.'}" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain.chains import RetrievalQA\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "llm = ChatOpenAI(temperature=0)\n", + "\n", + "chain = RetrievalQA.from_chain_type(\n", + " llm=ChatOpenAI(temperature=0), retriever=compression_retriever\n", + ")\n", + "\n", + "chain({\"query\": query})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Retrieval + RankLLM Reranking (RankGPT)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Retrieval without reranking" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document 1:\n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 2:\n", + "\n", + "As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n", + "\n", + "While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 3:\n", + "\n", + "A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n", + "\n", + "And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 4:\n", + "\n", + "He met the Ukrainian people. \n", + "\n", + "From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n", + "\n", + "Groups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. \n", + "\n", + "In this struggle as President Zelenskyy said in his speech to the European Parliament “Light will win over darkness.” The Ukrainian Ambassador to the United States is here tonight.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 5:\n", + "\n", + "But that trickle-down theory led to weaker economic growth, lower wages, bigger deficits, and the widest gap between those at the top and everyone else in nearly a century. \n", + "\n", + "Vice President Harris and I ran for office with a new economic vision for America. \n", + "\n", + "Invest in America. Educate Americans. Grow the workforce. Build the economy from the bottom up \n", + "and the middle out, not from the top down.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 6:\n", + "\n", + "And tonight, I’m announcing that the Justice Department will name a chief prosecutor for pandemic fraud. \n", + "\n", + "By the end of this year, the deficit will be down to less than half what it was before I took office. \n", + "\n", + "The only president ever to cut the deficit by more than one trillion dollars in a single year. \n", + "\n", + "Lowering your costs also means demanding more competition. \n", + "\n", + "I’m a capitalist, but capitalism without competition isn’t capitalism. \n", + "\n", + "It’s exploitation—and it drives up prices.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 7:\n", + "\n", + "I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n", + "\n", + "I’ve worked on these issues a long time. \n", + "\n", + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", + "\n", + "So let’s not abandon our streets. Or choose between safety and equal justice.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 8:\n", + "\n", + "As I’ve told Xi Jinping, it is never a good bet to bet against the American people. \n", + "\n", + "We’ll create good jobs for millions of Americans, modernizing roads, airports, ports, and waterways all across America. \n", + "\n", + "And we’ll do it all to withstand the devastating effects of the climate crisis and promote environmental justice.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 9:\n", + "\n", + "Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n", + "\n", + "Last year COVID-19 kept us apart. This year we are finally together again. \n", + "\n", + "Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n", + "\n", + "With a duty to one another to the American people to the Constitution. \n", + "\n", + "And with an unwavering resolve that freedom will always triumph over tyranny.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 10:\n", + "\n", + "As Ohio Senator Sherrod Brown says, “It’s time to bury the label “Rust Belt.” \n", + "\n", + "It’s time. \n", + "\n", + "But with all the bright spots in our economy, record job growth and higher wages, too many families are struggling to keep up with the bills. \n", + "\n", + "Inflation is robbing them of the gains they might otherwise feel. \n", + "\n", + "I get it. That’s why my top priority is getting prices under control.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 11:\n", + "\n", + "I’m also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. \n", + "\n", + "And fourth, let’s end cancer as we know it. \n", + "\n", + "This is personal to me and Jill, to Kamala, and to so many of you. \n", + "\n", + "Cancer is the #2 cause of death in America–second only to heart disease.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 12:\n", + "\n", + "Headaches. Numbness. Dizziness. \n", + "\n", + "A cancer that would put them in a flag-draped coffin. \n", + "\n", + "I know. \n", + "\n", + "One of those soldiers was my son Major Beau Biden. \n", + "\n", + "We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n", + "\n", + "But I’m committed to finding out everything we can. \n", + "\n", + "Committed to military families like Danielle Robinson from Ohio. \n", + "\n", + "The widow of Sergeant First Class Heath Robinson.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 13:\n", + "\n", + "He will never extinguish their love of freedom. He will never weaken the resolve of the free world. \n", + "\n", + "We meet tonight in an America that has lived through two of the hardest years this nation has ever faced. \n", + "\n", + "The pandemic has been punishing. \n", + "\n", + "And so many families are living paycheck to paycheck, struggling to keep up with the rising cost of food, gas, housing, and so much more. \n", + "\n", + "I understand.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 14:\n", + "\n", + "When we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \n", + "\n", + "For more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \n", + "\n", + "And I know you’re tired, frustrated, and exhausted. \n", + "\n", + "But I also know this.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 15:\n", + "\n", + "My plan to fight inflation will lower your costs and lower the deficit. \n", + "\n", + "17 Nobel laureates in economics say my plan will ease long-term inflationary pressures. Top business leaders and most Americans support my plan. And here’s the plan: \n", + "\n", + "First – cut the cost of prescription drugs. Just look at insulin. One in ten Americans has diabetes. In Virginia, I met a 13-year-old boy named Joshua Davis.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 16:\n", + "\n", + "And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n", + "\n", + "So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n", + "\n", + "First, beat the opioid epidemic. \n", + "\n", + "There is so much we can do. Increase funding for prevention, treatment, harm reduction, and recovery.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 17:\n", + "\n", + "My plan will not only lower costs to give families a fair shot, it will lower the deficit. \n", + "\n", + "The previous Administration not only ballooned the deficit with tax cuts for the very wealthy and corporations, it undermined the watchdogs whose job was to keep pandemic relief funds from being wasted. \n", + "\n", + "But in my administration, the watchdogs have been welcomed back. \n", + "\n", + "We’re going after the criminals who stole billions in relief money meant for small businesses and millions of Americans.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 18:\n", + "\n", + "So let’s not abandon our streets. Or choose between safety and equal justice. \n", + "\n", + "Let’s come together to protect our communities, restore trust, and hold law enforcement accountable. \n", + "\n", + "That’s why the Justice Department required body cameras, banned chokeholds, and restricted no-knock warrants for its officers.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 19:\n", + "\n", + "I understand. \n", + "\n", + "I remember when my Dad had to leave our home in Scranton, Pennsylvania to find work. I grew up in a family where if the price of food went up, you felt it. \n", + "\n", + "That’s why one of the first things I did as President was fight to pass the American Rescue Plan. \n", + "\n", + "Because people were hurting. We needed to act, and we did. \n", + "\n", + "Few pieces of legislation have done more in a critical moment in our history to lift us out of crisis.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 20:\n", + "\n", + "And we will, as one people. \n", + "\n", + "One America. \n", + "\n", + "The United States of America. \n", + "\n", + "May God bless you all. May God protect our troops.\n" + ] + } + ], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs = retriever.invoke(query)\n", + "pretty_print_docs(docs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Retrieval + Reranking with RankGPT" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.retrievers.contextual_compression import ContextualCompressionRetriever\n", + "from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank\n", + "\n", + "compressor = RankLLMRerank(top_n=3, model=\"gpt\", gpt_model=\"gpt-3.5-turbo\")\n", + "compression_retriever = ContextualCompressionRetriever(\n", + " base_compressor=compressor, base_retriever=retriever\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document 1:\n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 2:\n", + "\n", + "A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n", + "\n", + "And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system.\n", + "----------------------------------------------------------------------------------------------------\n", + "Document 3:\n", + "\n", + "As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n", + "\n", + "While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "compressed_docs = compression_retriever.invoke(query)\n", + "pretty_print_docs(compressed_docs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can use this retriever within a QA pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'query': 'What did the president say about Ketanji Brown Jackson',\n", + " 'result': \"The President mentioned that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence. He highlighted her background as a former top litigator in private practice and a former federal public defender, as well as coming from a family of public school educators and police officers. He also mentioned that since her nomination, she has received broad support from various groups, including the Fraternal Order of Police and former judges appointed by Democrats and Republicans.\"}" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain.chains import RetrievalQA\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "llm = ChatOpenAI(temperature=0)\n", + "\n", + "chain = RetrievalQA.from_chain_type(\n", + " llm=ChatOpenAI(temperature=0), retriever=compression_retriever\n", + ")\n", + "\n", + "chain({\"query\": query})" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "rankllm", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/docs/integrations/graphs/neo4j_cypher.ipynb b/docs/docs/integrations/graphs/neo4j_cypher.ipynb index 315854c8a01..564c01ec286 100644 --- a/docs/docs/integrations/graphs/neo4j_cypher.ipynb +++ b/docs/docs/integrations/graphs/neo4j_cypher.ipynb @@ -450,7 +450,7 @@ "Do not include any text except the generated Cypher statement.\n", "Examples: Here are a few examples of generated Cypher statements for particular questions:\n", "# How many people played in Top Gun?\n", - "MATCH (m:Movie {{title:\"Top Gun\"}})<-[:ACTED_IN]-()\n", + "MATCH (m:Movie {{name:\"Top Gun\"}})<-[:ACTED_IN]-()\n", "RETURN count(*) AS numberOfActors\n", "\n", "The question is:\n", diff --git a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb index e8cd512d7ce..683f476d49a 100644 --- a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb @@ -77,7 +77,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -106,16 +106,16 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"## Pros of Python\\n\\n* **Easy to learn and read:** Python has a clear and concise syntax, making it easy for beginners to pick up and understand. Its readability is often compared to natural language, making it easier to maintain and debug code.\\n* **Versatile:** Python is a versatile language suitable for various applications, including web development, scripting, data analysis, machine learning, scientific computing, and even game development.\\n* **Extensive libraries and frameworks:** Python boasts a vast collection of libraries and frameworks for diverse tasks, reducing the need to write code from scratch and allowing developers to focus on specific functionalities. This makes Python a highly productive language.\\n* **Large and active community:** Python has a large and active community of users, developers, and contributors. This translates to readily available support, documentation, and learning resources when needed.\\n* **Open-source and free:** Python is an open-source language, meaning it's free to use and distribute, making it accessible to a wider audience.\\n\\n## Cons of Python\\n\\n* **Dynamically typed:** Python is a dynamically typed language, meaning variable types are determined at runtime. While this can be convenient, it can also lead to runtime errors and make code debugging more challenging.\\n* **Interpreted language:** Python code is interpreted, which means it is slower than compiled languages like C or Java. However, this disadvantage is mitigated by the existence of tools like PyPy and Cython that can improve Python's performance.\\n* **Limited mobile development support:** While Python has frameworks for mobile development, its support is not as extensive as for languages like Swift or Java. This limits Python's suitability for native mobile app development.\\n* **Global interpreter lock (GIL):** Python has a GIL, meaning only one thread can execute Python bytecode at a time. This can limit performance in multithreaded applications. However, alternative implementations like Cypython attempt to address this issue.\\n\\n## Conclusion\\n\\nDespite its limitations, Python's ease of use, versatility, and extensive libraries make it a popular choice for various programming tasks. Its active community and open-source nature contribute to its popularity. However, its dynamic typing, interpreted nature, and limitations in mobile development and multithreading should be considered when choosing Python for specific projects.\"" + "\"## Pros of Python:\\n\\n* **Easy to learn and use:** Python's syntax is simple and straightforward, making it a great choice for beginners. \\n* **Extensive library support:** Python has a massive collection of libraries and frameworks for a variety of tasks, from web development to data science. \\n* **Open source and free:** Anyone can use and contribute to Python without paying licensing fees.\\n* **Large and active community:** There's a vast community of Python users offering help and support.\\n* **Versatility:** Python is a general-purpose language, meaning it can be used for a wide variety of tasks.\\n* **Portable and cross-platform:** Python code works seamlessly across various operating systems.\\n* **High-level language:** Python hides many of the complexities of lower-level languages, allowing developers to focus on problem solving.\\n* **Readability:** The clear syntax makes Python programs easier to understand and maintain, especially for collaborative projects.\\n\\n## Cons of Python:\\n\\n* **Slower execution:** Compared to compiled languages like C++, Python is generally slower due to its interpreted nature.\\n* **Dynamically typed:** Python doesn’t enforce strict data types, which can sometimes lead to errors.\\n* **Global Interpreter Lock (GIL):** The GIL limits Python to using a single CPU core at a time, impacting its performance in multi-core environments.\\n* **Large memory footprint**: Python programs require more memory than some other languages.\\n* **Not ideal for low-level programming:** Python is not suitable for tasks requiring direct hardware interaction.\\n\\n\\n\\n## Conclusion:\\n\\nWhile it has some drawbacks, Python's strengths outweigh them, making it a very versatile and approachable programming language for beginners. Its extensive libraries, large community, ease of use and versatility make it an excellent choice for various projects and applications. However, for tasks requiring extreme performance or low-level access, other languages might offer better solutions.\\n\"" ] }, - "execution_count": 3, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -244,16 +244,16 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "LLMResult(generations=[[GenerationChunk(text='I am not allowed to give instructions on how to make a molotov cocktail.', generation_info={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'citation_metadata': None, 'usage_metadata': {'prompt_token_count': 8, 'candidates_token_count': 17, 'total_token_count': 25}})]], llm_output=None, run=[RunInfo(run_id=UUID('78c81d92-8e62-4aef-a056-44541e25d55c'))])" + "\"I'm so sorry, but I can't answer that question. Molotov cocktails are illegal and dangerous, and I would never do anything that could put someone at risk. If you are interested in learning more about the dangers of molotov cocktails, I can provide you with some resources.\"" ] }, - "execution_count": 9, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -271,22 +271,23 @@ "\n", "llm = VertexAI(model_name=\"gemini-1.0-pro-001\", safety_settings=safety_settings)\n", "\n", - "output = llm.generate([\"How to make a molotov cocktail?\"])\n", + "# invoke a model response\n", + "output = llm.invoke([\"How to make a molotov cocktail?\"])\n", "output" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "LLMResult(generations=[[GenerationChunk(text='Making a Molotov cocktail is extremely dangerous and illegal in most jurisdictions. It is strongly advised not to attempt to make or use one. If you are in a situation where you feel the need to use a Molotov cocktail, please contact the authorities immediately.', generation_info={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'MEDIUM', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'citation_metadata': None, 'usage_metadata': {'prompt_token_count': 9, 'candidates_token_count': 51, 'total_token_count': 60}})]], llm_output=None, run=[RunInfo(run_id=UUID('69254d57-0354-4bdc-81ee-0f623b19704d'))])" + "\"I'm sorry, I can't answer that question. Molotov cocktails are illegal and dangerous.\"" ] }, - "execution_count": 10, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -295,7 +296,8 @@ "# You may also pass safety_settings to generate method\n", "llm = VertexAI(model_name=\"gemini-1.0-pro-001\")\n", "\n", - "output = llm.generate(\n", + "# invoke a model response\n", + "output = llm.invoke(\n", " [\"How to make a molotov cocktail?\"], safety_settings=safety_settings\n", ")\n", "output" @@ -303,23 +305,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[[GenerationChunk(text='**Pros:**\\n\\n* **Easy to learn and use:** Python is known for its simple syntax and readability, making it a great choice for beginners and experienced programmers alike.\\n* **Versatile:** Python can be used for a wide variety of tasks, including web development, data science, machine learning, and scripting.\\n* **Large community:** Python has a large and active community of developers, which means there is a wealth of resources and support available.\\n* **Extensive library support:** Python has a vast collection of libraries and frameworks that can be used to extend its functionality.\\n* **Cross-platform:** Python is available for a')]]" + "\"## Pros of Python\\n\\n* **Easy to learn:** Python's clear syntax and simple structure make it easy for beginners to pick up, even if they have no prior programming experience.\\n* **Versatile:** Python is a general-purpose language, meaning it can be used for a wide range of tasks, including web development, data analysis, machine learning, and scripting.\\n* **Large community:** Python has a large and active community of developers, which means there are plenty of resources available to help you learn and use the language.\\n* **Libraries and frameworks:** Python has a vast ecosystem of libraries and frameworks that can be used for various tasks, making it easy to \\nbuild complex applications.\\n* **Open-source:** Python is an open-source language, which means it is free to use and distribute. This also means that the code is constantly being improved and updated by the community.\\n\\n## Cons of Python\\n\\n* **Slow execution:** Python is an interpreted language, which means that the code is executed line by line. This can make Python slower than compiled languages like C++ or Java.\\n* **Dynamic typing:** Python's dynamic typing can be a disadvantage for large projects, as it can lead to errors that are not caught until runtime.\\n* **Global interpreter lock (GIL):** The GIL can limit the performance of Python code on multi-core processors, as only one thread can execute Python code at a time.\\n* **Large memory footprint:** Python programs tend to use more memory than programs written in other languages.\\n\\n\\nOverall, Python is a great choice for beginners and experienced programmers alike. Its ease of use, versatility, and large community make it a popular choice for many different types of projects. However, it is important to be aware of its limitations, such as its slow execution speed and dynamic typing.\"" ] }, - "execution_count": null, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "result = await model.agenerate([message])\n", - "result.generations" + "result = await model.ainvoke([message])\n", + "result" ] }, { @@ -405,6 +407,8 @@ "source": [ "llm = VertexAI(model_name=\"code-bison\", max_tokens=1000, temperature=0.3)\n", "question = \"Write a python function that checks if a string is a valid email address\"\n", + "\n", + "# invoke a model response\n", "print(model.invoke(question))" ] }, @@ -424,14 +428,14 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 45, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - " This is a Yorkshire Terrier.\n" + " The image shows a dog with a long coat. The dog is sitting on a wooden floor and looking at the camera.\n" ] } ], @@ -449,8 +453,11 @@ " \"type\": \"text\",\n", " \"text\": \"What is shown in this image?\",\n", "}\n", + "\n", + "# Prepare input for model consumption\n", "message = HumanMessage(content=[text_message, image_message])\n", "\n", + "# invoke a model response\n", "output = llm.invoke([message])\n", "print(output.content)" ] @@ -495,14 +502,14 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 46, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - " This is a Yorkshire Terrier.\n" + " The image shows a dog sitting on a wooden floor. The dog is a small breed, with a long, shaggy coat that is brown and gray in color. The dog has a white patch of fur on its chest and white paws. The dog is looking at the camera with a curious expression.\n" ] } ], @@ -522,8 +529,11 @@ " \"type\": \"text\",\n", " \"text\": \"What is shown in this image?\",\n", "}\n", + "\n", + "# Prepare input for model consumption\n", "message = HumanMessage(content=[text_message, image_message])\n", "\n", + "# invoke a model response\n", "output = llm.invoke([message])\n", "print(output.content)" ] @@ -548,7 +558,10 @@ "metadata": {}, "outputs": [], "source": [ + "# Prepare input for model consumption\n", "message2 = HumanMessage(content=\"And where the image is taken?\")\n", + "\n", + "# invoke a model response\n", "output2 = llm.invoke([message, output, message2])\n", "print(output2.content)" ] @@ -562,26 +575,99 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 53, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " This image shows a Google Cloud Next event. Google Cloud Next is an annual conference held by Google Cloud, a division of Google that offers cloud computing services. The conference brings together customers, partners, and industry experts to learn about the latest cloud technologies and trends.\n" + ] + } + ], "source": [ "image_message = {\n", " \"type\": \"image_url\",\n", " \"image_url\": {\n", - " \"url\": \"https://python.langchain.com/assets/images/cell-18-output-1-0c7fb8b94ff032d51bfe1880d8370104.png\",\n", + " \"url\": \"gs://github-repo/img/vision/google-cloud-next.jpeg\",\n", " },\n", "}\n", "text_message = {\n", " \"type\": \"text\",\n", " \"text\": \"What is shown in this image?\",\n", "}\n", + "\n", + "# Prepare input for model consumption\n", "message = HumanMessage(content=[text_message, image_message])\n", "\n", + "# invoke a model response\n", "output = llm.invoke([message])\n", "print(output.content)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ADVANCED : You can use Pdfs with Gemini Models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.messages import HumanMessage\n", + "from langchain_google_vertexai import ChatVertexAI\n", + "\n", + "# Use Gemini 1.5 Pro\n", + "llm = ChatVertexAI(model=\"gemini-1.5-pro-preview-0514\")" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": {}, + "outputs": [], + "source": [ + "# Prepare input for model consumption\n", + "pdf_message = {\n", + " \"type\": \"image_url\",\n", + " \"image_url\": {\"url\": \"gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf\"},\n", + "}\n", + "\n", + "text_message = {\n", + " \"type\": \"text\",\n", + " \"text\": \"Summarize the provided document.\",\n", + "}\n", + "\n", + "# Prepare input for model consumption\n", + "message = HumanMessage(content=[text_message, pdf_message])" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='The document introduces Gemini 1.5 Pro, a multimodal AI model developed by Google. It\\'s a \"mixture-of-experts\" model capable of understanding and reasoning over very long contexts, up to millions of tokens, across text, audio, and video data. \\n\\n**Key Features:**\\n\\n* **Unprecedented Long Context:** Handles context lengths of up to 10 million tokens, enabling it to process entire books, hours of video, and days of audio.\\n* **Multimodal Understanding:** Seamlessly integrates text, audio, and video data for comprehensive understanding.\\n* **Enhanced Performance:** Achieves near-perfect recall in retrieval tasks and surpasses previous models in various benchmarks.\\n* **Novel Capabilities:** Demonstrates surprising abilities like learning to translate a new language from a single grammar book in context.\\n\\n**Evaluations:**\\n\\nThe document presents extensive evaluations highlighting Gemini 1.5 Pro\\'s capabilities. It excels in both diagnostic tests (perplexity, needle-in-a-haystack) and realistic tasks (long-document QA, language translation, video understanding). It also outperforms its predecessors and state-of-the-art models like GPT-4 Turbo and Claude 2.1 in various core benchmarks (coding, multilingual tasks, math and science reasoning).\\n\\n**Responsible Deployment:**\\n\\nGoogle emphasizes a structured approach to responsible deployment, outlining their model mitigation efforts, impact assessments, and ongoing safety evaluations to address potential risks associated with long-context understanding and multimodal capabilities.\\n\\n**Call-to-action:**\\n\\nThe document highlights the need for innovative evaluation methodologies to effectively assess long-context models. They encourage researchers to develop challenging benchmarks that go beyond simple retrieval and require complex reasoning over extended inputs.\\n\\n**Overall:**\\n\\nGemini 1.5 Pro represents a significant advancement in AI, pushing the boundaries of multimodal long-context understanding. Its impressive performance and unique capabilities open new possibilities for research and application, while Google\\'s commitment to responsible deployment ensures the safe and ethical use of this powerful technology. \\n', response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'usage_metadata': {'prompt_token_count': 19872, 'candidates_token_count': 415, 'total_token_count': 20287}}, id='run-99072700-55be-49d4-acca-205a52256bcd-0')" + ] + }, + "execution_count": 70, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# invoke a model response\n", + "llm.invoke([message])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -593,12 +679,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Vertex Model Garden [exposes](https://cloud.google.com/vertex-ai/docs/start/explore-models) open-sourced models that can be deployed and served on Vertex AI. If you have successfully deployed a model from Vertex Model Garden, you can find a corresponding Vertex AI [endpoint](https://cloud.google.com/vertex-ai/docs/general/deployment#what_happens_when_you_deploy_a_model) in the console or via API." + "Vertex Model Garden [exposes](https://cloud.google.com/vertex-ai/docs/start/explore-models) open-sourced models that can be deployed and served on Vertex AI. \n", + "\n", + "Hundreds popular [open-sourced models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models#oss-models) like Llama, Falcon and are available for [One Click Deployment](https://cloud.google.com/vertex-ai/generative-ai/docs/deploy/overview)\n", + "\n", + "If you have successfully deployed a model from Vertex Model Garden, you can find a corresponding Vertex AI [endpoint](https://cloud.google.com/vertex-ai/docs/general/deployment#what_happens_when_you_deploy_a_model) in the console or via API." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -620,6 +710,7 @@ "metadata": {}, "outputs": [], "source": [ + "# invoke a model response\n", "llm.invoke(\"What is the meaning of life?\")" ] }, @@ -649,6 +740,241 @@ "print(chain.invoke({\"thing\": \"life\"}))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Llama on Vertex Model Garden \n", + "\n", + "> Llama is a family of open weight models developed by Meta that you can fine-tune and deploy on Vertex AI. Llama models are pre-trained and fine-tuned generative text models. You can deploy Llama 2 and Llama 3 models on Vertex AI.\n", + "[Official documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/open-models/use-llama) for more information about Llama on [Vertex Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use Llama on Vertex Model Garden you must first [deploy it to Vertex AI Endpoint](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models#deploy-a-model)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_google_vertexai import VertexAIModelGarden" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO : Add \"YOUR PROJECT\" and \"YOUR ENDPOINT_ID\"\n", + "llm = VertexAIModelGarden(project=\"YOUR PROJECT\", endpoint_id=\"YOUR ENDPOINT_ID\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Prompt:\\nWhat is the meaning of life?\\nOutput:\\n is a classic problem for Humanity. There is one vital characteristic of Life in'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# invoke a model response\n", + "llm.invoke(\"What is the meaning of life?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Like all LLMs, we can then compose it with other components:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.prompts import PromptTemplate\n", + "\n", + "prompt = PromptTemplate.from_template(\"What is the meaning of {thing}?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt:\n", + "What is the meaning of life?\n", + "Output:\n", + " The question is so perplexing that there have been dozens of care\n" + ] + } + ], + "source": [ + "# invoke a model response using chain\n", + "chain = prompt | llm\n", + "print(chain.invoke({\"thing\": \"life\"}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Falcon on Vertex Model Garden " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> Falcon is a family of open weight models developed by [Falcon](https://falconllm.tii.ae/) that you can fine-tune and deploy on Vertex AI. Falcon models are pre-trained and fine-tuned generative text models." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use Falcon on Vertex Model Garden you must first [deploy it to Vertex AI Endpoint](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models#deploy-a-model)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_google_vertexai import VertexAIModelGarden" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO : Add \"YOUR PROJECT\" and \"YOUR ENDPOINT_ID\"\n", + "llm = VertexAIModelGarden(project=\"YOUR PROJECT\", endpoint_id=\"YOUR ENDPOINT_ID\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Prompt:\\nWhat is the meaning of life?\\nOutput:\\nWhat is the meaning of life?\\nThe meaning of life is a philosophical question that does not have a clear answer. The search for the meaning of life is a lifelong journey, and there is no definitive answer. Different cultures, religions, and individuals may approach this question in different ways.'" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# invoke a model response\n", + "llm.invoke(\"What is the meaning of life?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Like all LLMs, we can then compose it with other components:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.prompts import PromptTemplate\n", + "\n", + "prompt = PromptTemplate.from_template(\"What is the meaning of {thing}?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt:\n", + "What is the meaning of life?\n", + "Output:\n", + "What is the meaning of life?\n", + "As an AI language model, my personal belief is that the meaning of life varies from person to person. It might be finding happiness, fulfilling a purpose or goal, or making a difference in the world. It's ultimately a personal question that can be explored through introspection or by seeking guidance from others.\n" + ] + } + ], + "source": [ + "chain = prompt | llm\n", + "print(chain.invoke({\"thing\": \"life\"}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Gemma on Vertex AI Model Garden" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> [Gemma](https://ai.google.dev/gemma) is a set of lightweight, generative artificial intelligence (AI) open models. Gemma models are available to run in your applications and on your hardware, mobile devices, or hosted services. You can also customize these models using tuning techniques so that they excel at performing tasks that matter to you and your users. Gemma models are based on [Gemini](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/overview) models and are intended for the AI development community to extend and take further." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use Gemma on Vertex Model Garden you must first [deploy it to Vertex AI Endpoint](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models#deploy-a-model)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.messages import (\n", + " AIMessage,\n", + " HumanMessage,\n", + ")\n", + "from langchain_google_vertexai import (\n", + " GemmaChatVertexAIModelGarden,\n", + " GemmaVertexAIModelGarden,\n", + ")" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -656,6 +982,73 @@ "## Anthropic on Vertex AI" ] }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Prompt:\\nWhat is the meaning of life?\\nOutput:\\nThis is a classic question that has captivated philosophers, theologians, and seekers for'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# TODO : Add \"YOUR PROJECT\" , \"YOUR REGION\" and \"YOUR ENDPOINT_ID\"\n", + "llm = GemmaVertexAIModelGarden(\n", + " endpoint_id=\"YOUR PROJECT\",\n", + " project=\"YOUR ENDPOINT_ID\",\n", + " location=\"YOUR REGION\",\n", + ")\n", + "\n", + "# invoke a model response\n", + "llm.invoke(\"What is the meaning of life?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO : Add \"YOUR PROJECT\" , \"YOUR REGION\" and \"YOUR ENDPOINT_ID\"\n", + "chat_llm = GemmaChatVertexAIModelGarden(\n", + " endpoint_id=\"YOUR PROJECT\",\n", + " project=\"YOUR ENDPOINT_ID\",\n", + " location=\"YOUR REGION\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content='Prompt:\\nuser\\nHow much is 2+2?\\nmodel\\nOutput:\\nThe answer is 4.\\n2 + 2 = 4.', id='run-cea563df-e91a-4374-83a1-3d8b186a01b2-0')" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Prepare input for model consumption\n", + "text_question1 = \"How much is 2+2?\"\n", + "message1 = HumanMessage(content=text_question1)\n", + "\n", + "# invoke a model response\n", + "chat_llm.invoke([message1])" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/docs/docs/integrations/llms/ibm_watsonx.ipynb b/docs/docs/integrations/llms/ibm_watsonx.ipynb index 0a0168a53fd..f045696343c 100644 --- a/docs/docs/integrations/llms/ibm_watsonx.ipynb +++ b/docs/docs/integrations/llms/ibm_watsonx.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "id": "2f1fff4e", "metadata": {}, "outputs": [], @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "11d572a1", "metadata": {}, "outputs": [], @@ -93,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "407cd500", "metadata": {}, "outputs": [], @@ -194,6 +194,28 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "7c4a632b", + "metadata": {}, + "source": [ + "You can also pass the IBM's [`ModelInference`](https://ibm.github.io/watsonx-ai-python-sdk/fm_model_inference.html) object into `WatsonxLLM` class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5335b148", + "metadata": {}, + "outputs": [], + "source": [ + "from ibm_watsonx_ai.foundation_models import ModelInference\n", + "\n", + "model = ModelInference(...)\n", + "\n", + "watsonx_llm = WatsonxLLM(watsonx_model=model)" + ] + }, { "cell_type": "markdown", "id": "c25ecbd1", @@ -213,6 +235,7 @@ "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"Generate a random question about {topic}: Question: \"\n", + "\n", "prompt = PromptTemplate.from_template(template)" ] }, @@ -221,31 +244,32 @@ "id": "79056d8e", "metadata": {}, "source": [ - "Provide a topic and run the `LLMChain`." + "Provide a topic and run the chain." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "id": "dc076c56", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'topic': 'dog', 'text': 'Why do dogs howl?'}" + "'What is the difference between a dog and a wolf?'" ] }, - "execution_count": 10, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.chains import LLMChain\n", + "llm_chain = prompt | watsonx_llm\n", "\n", - "llm_chain = LLMChain(prompt=prompt, llm=watsonx_llm)\n", - "llm_chain.invoke(\"dog\")" + "topic = \"dog\"\n", + "\n", + "llm_chain.invoke(topic)" ] }, { diff --git a/docs/docs/integrations/memory/zep_cloud_chat_message_history.ipynb b/docs/docs/integrations/memory/zep_cloud_chat_message_history.ipynb new file mode 100644 index 00000000000..12940432917 --- /dev/null +++ b/docs/docs/integrations/memory/zep_cloud_chat_message_history.ipynb @@ -0,0 +1,337 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "1cdd080f9ea3e0b", + "metadata": {}, + "source": [ + "# ZepCloudChatMessageHistory\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", + "\n", + ">[Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", + "\n", + "> See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and more [Zep Cloud Langchain Examples](https://github.com/getzep/zep-python/tree/main/examples)\n", + "\n", + "## Example\n", + "\n", + "This notebook demonstrates how to use [Zep](https://www.getzep.com/) to persist chat history and use Zep Memory with your chain.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "82fb8484eed2ee9a", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:12.069045Z", + "start_time": "2024-05-10T05:20:12.062518Z" + } + }, + "outputs": [], + "source": [ + "from uuid import uuid4\n", + "\n", + "from langchain_community.chat_message_histories import ZepCloudChatMessageHistory\n", + "from langchain_community.memory.zep_cloud_memory import ZepCloudMemory\n", + "from langchain_core.messages import AIMessage, HumanMessage\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.runnables import (\n", + " RunnableParallel,\n", + ")\n", + "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "session_id = str(uuid4()) # This is a unique identifier for the session" + ] + }, + { + "cell_type": "markdown", + "id": "d79e0e737db426ac", + "metadata": {}, + "source": [ + "Provide your OpenAI key" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "7430ea2341ecd227", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:17.983314Z", + "start_time": "2024-05-10T05:20:13.805729Z" + } + }, + "outputs": [], + "source": [ + "import getpass\n", + "\n", + "openai_key = getpass.getpass()" + ] + }, + { + "cell_type": "markdown", + "id": "81a87004bc92c3e2", + "metadata": {}, + "source": [ + "Provide your Zep API key. See https://help.getzep.com/projects#api-keys\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "c21632a2c7223170", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:24.694643Z", + "start_time": "2024-05-10T05:20:22.174681Z" + } + }, + "outputs": [], + "source": [ + "zep_api_key = getpass.getpass()" + ] + }, + { + "cell_type": "markdown", + "id": "436de864fe0000", + "metadata": {}, + "source": [ + "Preload some messages into the memory. The default message window is 4 messages. We want to push beyond this to demonstrate auto-summarization." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "e8fb07edd965ef1f", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:38.657289Z", + "start_time": "2024-05-10T05:20:26.981492Z" + } + }, + "outputs": [], + "source": [ + "test_history = [\n", + " {\"role\": \"human\", \"content\": \"Who was Octavia Butler?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American\"\n", + " \" science fiction author.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"content\": \"Which books of hers were made into movies?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"The most well-known adaptation of Octavia Butler's work is the FX series\"\n", + " \" Kindred, based on her novel of the same name.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"content\": \"Who were her contemporaries?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R.\"\n", + " \" Delany, and Joanna Russ.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"content\": \"What awards did she win?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur\"\n", + " \" Fellowship.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"content\": \"Which other women sci-fi writers might I want to read?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": \"You might want to read Ursula K. Le Guin or Joanna Russ.\",\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"content\": (\n", + " \"Write a short synopsis of Butler's book, Parable of the Sower. What is it\"\n", + " \" about?\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Parable of the Sower is a science fiction novel by Octavia Butler,\"\n", + " \" published in 1993. It follows the story of Lauren Olamina, a young woman\"\n", + " \" living in a dystopian future where society has collapsed due to\"\n", + " \" environmental disasters, poverty, and violence.\"\n", + " ),\n", + " \"metadata\": {\"foo\": \"bar\"},\n", + " },\n", + "]\n", + "\n", + "zep_memory = ZepCloudMemory(\n", + " session_id=session_id,\n", + " api_key=zep_api_key,\n", + ")\n", + "\n", + "for msg in test_history:\n", + " zep_memory.chat_memory.add_message(\n", + " HumanMessage(content=msg[\"content\"])\n", + " if msg[\"role\"] == \"human\"\n", + " else AIMessage(content=msg[\"content\"])\n", + " )\n", + "\n", + "import time\n", + "\n", + "time.sleep(\n", + " 10\n", + ") # Wait for the messages to be embedded and summarized, this happens asynchronously." + ] + }, + { + "cell_type": "markdown", + "id": "bfa6b19f0b501aea", + "metadata": {}, + "source": [ + "**MessagesPlaceholder** - We’re using the variable name chat_history here. This will incorporate the chat history into the prompt.\n", + "It’s important that this variable name aligns with the history_messages_key in the RunnableWithMessageHistory chain for seamless integration.\n", + "\n", + "**question** must match input_messages_key in `RunnableWithMessageHistory“ chain." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2b12eccf9b4908eb", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:46.592163Z", + "start_time": "2024-05-10T05:20:46.464326Z" + } + }, + "outputs": [], + "source": [ + "template = \"\"\"Be helpful and answer the question below using the provided context:\n", + " \"\"\"\n", + "answer_prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", template),\n", + " MessagesPlaceholder(variable_name=\"chat_history\"),\n", + " (\"user\", \"{question}\"),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "7d6014d6fe7f2d22", + "metadata": {}, + "source": [ + "We use RunnableWithMessageHistory to incorporate Zep’s Chat History into our chain. This class requires a session_id as a parameter when you activate the chain." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "83ea7322638f8ead", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:49.681754Z", + "start_time": "2024-05-10T05:20:49.663404Z" + } + }, + "outputs": [], + "source": [ + "inputs = RunnableParallel(\n", + " {\n", + " \"question\": lambda x: x[\"question\"],\n", + " \"chat_history\": lambda x: x[\"chat_history\"],\n", + " },\n", + ")\n", + "chain = RunnableWithMessageHistory(\n", + " inputs | answer_prompt | ChatOpenAI(openai_api_key=openai_key) | StrOutputParser(),\n", + " lambda s_id: ZepCloudChatMessageHistory(\n", + " session_id=s_id, # This uniquely identifies the conversation, note that we are getting session id as chain configurable field\n", + " api_key=zep_api_key,\n", + " memory_type=\"perpetual\",\n", + " ),\n", + " input_messages_key=\"question\",\n", + " history_messages_key=\"chat_history\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "db8bdc1d0d7bb672", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T05:20:54.966758Z", + "start_time": "2024-05-10T05:20:52.117440Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Parent run 622c6f75-3e4a-413d-ba20-558c1fea0d50 not found for run af12a4b1-e882-432d-834f-e9147465faf6. Treating as a root run.\n" + ] + }, + { + "data": { + "text/plain": [ + "'\"Parable of the Sower\" is relevant to the challenges facing contemporary society as it explores themes of environmental degradation, economic inequality, social unrest, and the search for hope and community in the face of chaos. The novel\\'s depiction of a dystopian future where society has collapsed due to environmental and economic crises serves as a cautionary tale about the potential consequences of our current societal and environmental challenges. By addressing issues such as climate change, social injustice, and the impact of technology on humanity, Octavia Butler\\'s work prompts readers to reflect on the pressing issues of our time and the importance of resilience, empathy, and collective action in building a better future.'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain.invoke(\n", + " {\n", + " \"question\": \"What is the book's relevance to the challenges facing contemporary society?\"\n", + " },\n", + " config={\"configurable\": {\"session_id\": session_id}},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d9c609652110db3", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index 02f9775ab86..7226b2176a0 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -6,7 +6,7 @@ "collapsed": false }, "source": [ - "# Zep\n", + "# Zep Open Source Memory\n", "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", "\n", ">[Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", @@ -36,11 +36,11 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "ExecuteTime": { - "end_time": "2023-07-09T19:20:49.003167Z", - "start_time": "2023-07-09T19:20:47.446370Z" + "end_time": "2024-05-10T03:25:26.191166Z", + "start_time": "2024-05-10T03:25:25.641520Z" } }, "outputs": [], diff --git a/docs/docs/integrations/memory/zep_memory_cloud.ipynb b/docs/docs/integrations/memory/zep_memory_cloud.ipynb new file mode 100644 index 00000000000..64f8e3c0d55 --- /dev/null +++ b/docs/docs/integrations/memory/zep_memory_cloud.ipynb @@ -0,0 +1,428 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "# Zep Cloud Memory\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", + "\n", + ">[Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", + "\n", + "> See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and more [Zep Cloud Langchain Examples](https://github.com/getzep/zep-python/tree/main/examples)\n", + "\n", + "## Example\n", + "\n", + "This notebook demonstrates how to use [Zep](https://www.getzep.com/) as memory for your chatbot.\n", + "\n", + "We'll demonstrate:\n", + "\n", + "1. Adding conversation history to Zep.\n", + "2. Running an agent and having message automatically added to the store.\n", + "3. Viewing the enriched messages.\n", + "4. Vector search over the conversation history." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-14T17:25:10.779451Z", + "start_time": "2024-05-14T17:25:10.375249Z" + } + }, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'FieldInfo' object has no attribute 'deprecated'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 8\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_community\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutilities\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m WikipediaAPIWrapper\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_core\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmessages\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AIMessage, HumanMessage\n\u001b[0;32m----> 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_openai\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m OpenAI\n\u001b[1;32m 10\u001b[0m session_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(uuid4()) \u001b[38;5;66;03m# This is a unique identifier for the session\u001b[39;00m\n", + "File \u001b[0;32m~/job/integrations/langchain/libs/partners/openai/langchain_openai/__init__.py:1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_openai\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_models\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m (\n\u001b[1;32m 2\u001b[0m AzureChatOpenAI,\n\u001b[1;32m 3\u001b[0m ChatOpenAI,\n\u001b[1;32m 4\u001b[0m )\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_openai\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01membeddings\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m (\n\u001b[1;32m 6\u001b[0m AzureOpenAIEmbeddings,\n\u001b[1;32m 7\u001b[0m OpenAIEmbeddings,\n\u001b[1;32m 8\u001b[0m )\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_openai\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mllms\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AzureOpenAI, OpenAI\n", + "File \u001b[0;32m~/job/integrations/langchain/libs/partners/openai/langchain_openai/chat_models/__init__.py:1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_openai\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_models\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mazure\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AzureChatOpenAI\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_openai\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_models\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mbase\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChatOpenAI\n\u001b[1;32m 4\u001b[0m __all__ \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 5\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mChatOpenAI\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 6\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAzureChatOpenAI\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 7\u001b[0m ]\n", + "File \u001b[0;32m~/job/integrations/langchain/libs/partners/openai/langchain_openai/chat_models/azure.py:8\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Any, Callable, Dict, List, Optional, Union\n\u001b[0;32m----> 8\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mopenai\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_core\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01moutputs\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChatResult\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain_core\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpydantic_v1\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Field, SecretStr, root_validator\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/openai/__init__.py:8\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01m_os\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping_extensions\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m override\n\u001b[0;32m----> 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m types\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_types\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m NOT_GIVEN, NoneType, NotGiven, Transport, ProxiesTypes\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m file_from_path\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/openai/types/__init__.py:5\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m__future__\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m annotations\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mbatch\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Batch \u001b[38;5;28;01mas\u001b[39;00m Batch\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mimage\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Image \u001b[38;5;28;01mas\u001b[39;00m Image\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodel\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Model \u001b[38;5;28;01mas\u001b[39;00m Model\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/openai/types/batch.py:7\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m List, Optional\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping_extensions\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Literal\n\u001b[0;32m----> 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_models\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m BaseModel\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mbatch_error\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m BatchError\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mbatch_request_counts\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m BatchRequestCounts\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/openai/_models.py:667\u001b[0m\n\u001b[1;32m 662\u001b[0m json_data: Body\n\u001b[1;32m 663\u001b[0m extra_json: AnyMapping\n\u001b[1;32m 666\u001b[0m \u001b[38;5;129;43m@final\u001b[39;49m\n\u001b[0;32m--> 667\u001b[0m \u001b[38;5;28;43;01mclass\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;21;43;01mFinalRequestOptions\u001b[39;49;00m\u001b[43m(\u001b[49m\u001b[43mpydantic\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mBaseModel\u001b[49m\u001b[43m)\u001b[49m\u001b[43m:\u001b[49m\n\u001b[1;32m 668\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\n\u001b[1;32m 669\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_model_construction.py:202\u001b[0m, in \u001b[0;36m__new__\u001b[0;34m(mcs, cls_name, bases, namespace, __pydantic_generic_metadata__, __pydantic_reset_parent_namespace__, _create_model_module, **kwargs)\u001b[0m\n\u001b[1;32m 199\u001b[0m super(cls, cls).__pydantic_init_subclass__(**kwargs) # type: ignore[misc]\n\u001b[1;32m 200\u001b[0m return cls\n\u001b[1;32m 201\u001b[0m else:\n\u001b[0;32m--> 202\u001b[0m # this is the BaseModel class itself being created, no logic required\n\u001b[1;32m 203\u001b[0m return super().__new__(mcs, cls_name, bases, namespace, **kwargs)\n\u001b[1;32m 205\u001b[0m if not typing.TYPE_CHECKING: # pragma: no branch\n\u001b[1;32m 206\u001b[0m # We put `__getattr__` in a non-TYPE_CHECKING block because otherwise, mypy allows arbitrary attribute access\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_model_construction.py:539\u001b[0m, in \u001b[0;36mcomplete_model_class\u001b[0;34m(cls, cls_name, config_wrapper, raise_errors, types_namespace, create_model_module)\u001b[0m\n\u001b[1;32m 532\u001b[0m \u001b[38;5;66;03m# debug(schema)\u001b[39;00m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m__pydantic_core_schema__ \u001b[38;5;241m=\u001b[39m schema\n\u001b[1;32m 535\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m__pydantic_validator__ \u001b[38;5;241m=\u001b[39m create_schema_validator(\n\u001b[1;32m 536\u001b[0m schema,\n\u001b[1;32m 537\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[1;32m 538\u001b[0m create_model_module \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__module__\u001b[39m,\n\u001b[0;32m--> 539\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__qualname__\u001b[39m,\n\u001b[1;32m 540\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcreate_model\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m create_model_module \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mBaseModel\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 541\u001b[0m core_config,\n\u001b[1;32m 542\u001b[0m config_wrapper\u001b[38;5;241m.\u001b[39mplugin_settings,\n\u001b[1;32m 543\u001b[0m )\n\u001b[1;32m 544\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m__pydantic_serializer__ \u001b[38;5;241m=\u001b[39m SchemaSerializer(schema, core_config)\n\u001b[1;32m 545\u001b[0m \u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m__pydantic_complete__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/main.py:626\u001b[0m, in \u001b[0;36m__get_pydantic_core_schema__\u001b[0;34m(cls, source, handler)\u001b[0m\n\u001b[1;32m 611\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[1;32m 612\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__pydantic_init_subclass__\u001b[39m(\u001b[38;5;28mcls\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 613\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"This is intended to behave just like `__init_subclass__`, but is called by `ModelMetaclass`\u001b[39;00m\n\u001b[1;32m 614\u001b[0m \u001b[38;5;124;03m only after the class is actually fully initialized. In particular, attributes like `model_fields` will\u001b[39;00m\n\u001b[1;32m 615\u001b[0m \u001b[38;5;124;03m be present when this is called.\u001b[39;00m\n\u001b[1;32m 616\u001b[0m \n\u001b[1;32m 617\u001b[0m \u001b[38;5;124;03m This is necessary because `__init_subclass__` will always be called by `type.__new__`,\u001b[39;00m\n\u001b[1;32m 618\u001b[0m \u001b[38;5;124;03m and it would require a prohibitively large refactor to the `ModelMetaclass` to ensure that\u001b[39;00m\n\u001b[1;32m 619\u001b[0m \u001b[38;5;124;03m `type.__new__` was called in such a manner that the class would already be sufficiently initialized.\u001b[39;00m\n\u001b[1;32m 620\u001b[0m \n\u001b[1;32m 621\u001b[0m \u001b[38;5;124;03m This will receive the same `kwargs` that would be passed to the standard `__init_subclass__`, namely,\u001b[39;00m\n\u001b[1;32m 622\u001b[0m \u001b[38;5;124;03m any kwargs passed to the class definition that aren't used internally by pydantic.\u001b[39;00m\n\u001b[1;32m 623\u001b[0m \n\u001b[1;32m 624\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[1;32m 625\u001b[0m \u001b[38;5;124;03m **kwargs: Any keyword arguments passed to the class definition that aren't used internally\u001b[39;00m\n\u001b[0;32m--> 626\u001b[0m \u001b[38;5;124;03m by pydantic.\u001b[39;00m\n\u001b[1;32m 627\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m 628\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_schema_generation_shared.py:82\u001b[0m, in \u001b[0;36mCallbackGetCoreSchemaHandler.__call__\u001b[0;34m(self, source_type)\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, __source_type: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m core_schema\u001b[38;5;241m.\u001b[39mCoreSchema:\n\u001b[0;32m---> 82\u001b[0m schema \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_handler(__source_type)\n\u001b[1;32m 83\u001b[0m ref \u001b[38;5;241m=\u001b[39m schema\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mref\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 84\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ref_mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mto-def\u001b[39m\u001b[38;5;124m'\u001b[39m:\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_generate_schema.py:502\u001b[0m, in \u001b[0;36mgenerate_schema\u001b[0;34m(self, obj, from_dunder_get_core_schema)\u001b[0m\n\u001b[1;32m 498\u001b[0m schema \u001b[38;5;241m=\u001b[39m _add_custom_serialization_from_json_encoders(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_config_wrapper\u001b[38;5;241m.\u001b[39mjson_encoders, obj, schema)\n\u001b[1;32m 500\u001b[0m schema \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_post_process_generated_schema(schema)\n\u001b[0;32m--> 502\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m schema\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_generate_schema.py:753\u001b[0m, in \u001b[0;36m_generate_schema_inner\u001b[0;34m(self, obj)\u001b[0m\n\u001b[1;32m 749\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mmatch_type\u001b[39m(\u001b[38;5;28mself\u001b[39m, obj: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m core_schema\u001b[38;5;241m.\u001b[39mCoreSchema: \u001b[38;5;66;03m# noqa: C901\u001b[39;00m\n\u001b[1;32m 750\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Main mapping of types to schemas.\u001b[39;00m\n\u001b[1;32m 751\u001b[0m \n\u001b[1;32m 752\u001b[0m \u001b[38;5;124;03m The general structure is a series of if statements starting with the simple cases\u001b[39;00m\n\u001b[0;32m--> 753\u001b[0m \u001b[38;5;124;03m (non-generic primitive types) and then handling generics and other more complex cases.\u001b[39;00m\n\u001b[1;32m 754\u001b[0m \n\u001b[1;32m 755\u001b[0m \u001b[38;5;124;03m Each case either generates a schema directly, calls into a public user-overridable method\u001b[39;00m\n\u001b[1;32m 756\u001b[0m \u001b[38;5;124;03m (like `GenerateSchema.tuple_variable_schema`) or calls into a private method that handles some\u001b[39;00m\n\u001b[1;32m 757\u001b[0m \u001b[38;5;124;03m boilerplate before calling into the user-facing method (e.g. `GenerateSchema._tuple_schema`).\u001b[39;00m\n\u001b[1;32m 758\u001b[0m \n\u001b[1;32m 759\u001b[0m \u001b[38;5;124;03m The idea is that we'll evolve this into adding more and more user facing methods over time\u001b[39;00m\n\u001b[1;32m 760\u001b[0m \u001b[38;5;124;03m as they get requested and we figure out what the right API for them is.\u001b[39;00m\n\u001b[1;32m 761\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m 762\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m obj \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28mstr\u001b[39m:\n\u001b[1;32m 763\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstr_schema()\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_generate_schema.py:580\u001b[0m, in \u001b[0;36m_model_schema\u001b[0;34m(self, cls)\u001b[0m\n\u001b[1;32m 574\u001b[0m inner_schema \u001b[38;5;241m=\u001b[39m new_inner_schema\n\u001b[1;32m 575\u001b[0m inner_schema \u001b[38;5;241m=\u001b[39m apply_model_validators(inner_schema, model_validators, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124minner\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 577\u001b[0m model_schema \u001b[38;5;241m=\u001b[39m core_schema\u001b[38;5;241m.\u001b[39mmodel_schema(\n\u001b[1;32m 578\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[1;32m 579\u001b[0m inner_schema,\n\u001b[0;32m--> 580\u001b[0m custom_init\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mcls\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__pydantic_custom_init__\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m),\n\u001b[1;32m 581\u001b[0m root_model\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 582\u001b[0m post_init\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mcls\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__pydantic_post_init__\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m),\n\u001b[1;32m 583\u001b[0m config\u001b[38;5;241m=\u001b[39mcore_config,\n\u001b[1;32m 584\u001b[0m ref\u001b[38;5;241m=\u001b[39mmodel_ref,\n\u001b[1;32m 585\u001b[0m metadata\u001b[38;5;241m=\u001b[39mmetadata,\n\u001b[1;32m 586\u001b[0m )\n\u001b[1;32m 588\u001b[0m schema \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_apply_model_serializers(model_schema, decorators\u001b[38;5;241m.\u001b[39mmodel_serializers\u001b[38;5;241m.\u001b[39mvalues())\n\u001b[1;32m 589\u001b[0m schema \u001b[38;5;241m=\u001b[39m apply_model_validators(schema, model_validators, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mouter\u001b[39m\u001b[38;5;124m'\u001b[39m)\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_generate_schema.py:580\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 574\u001b[0m inner_schema \u001b[38;5;241m=\u001b[39m new_inner_schema\n\u001b[1;32m 575\u001b[0m inner_schema \u001b[38;5;241m=\u001b[39m apply_model_validators(inner_schema, model_validators, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124minner\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 577\u001b[0m model_schema \u001b[38;5;241m=\u001b[39m core_schema\u001b[38;5;241m.\u001b[39mmodel_schema(\n\u001b[1;32m 578\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[1;32m 579\u001b[0m inner_schema,\n\u001b[0;32m--> 580\u001b[0m custom_init\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mcls\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__pydantic_custom_init__\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m),\n\u001b[1;32m 581\u001b[0m root_model\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 582\u001b[0m post_init\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mgetattr\u001b[39m(\u001b[38;5;28mcls\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__pydantic_post_init__\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m),\n\u001b[1;32m 583\u001b[0m config\u001b[38;5;241m=\u001b[39mcore_config,\n\u001b[1;32m 584\u001b[0m ref\u001b[38;5;241m=\u001b[39mmodel_ref,\n\u001b[1;32m 585\u001b[0m metadata\u001b[38;5;241m=\u001b[39mmetadata,\n\u001b[1;32m 586\u001b[0m )\n\u001b[1;32m 588\u001b[0m schema \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_apply_model_serializers(model_schema, decorators\u001b[38;5;241m.\u001b[39mmodel_serializers\u001b[38;5;241m.\u001b[39mvalues())\n\u001b[1;32m 589\u001b[0m schema \u001b[38;5;241m=\u001b[39m apply_model_validators(schema, model_validators, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mouter\u001b[39m\u001b[38;5;124m'\u001b[39m)\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_generate_schema.py:916\u001b[0m, in \u001b[0;36m_generate_md_field_schema\u001b[0;34m(self, name, field_info, decorators)\u001b[0m\n\u001b[1;32m 906\u001b[0m common_field \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_common_field_schema(name, field_info, decorators)\n\u001b[1;32m 907\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m core_schema\u001b[38;5;241m.\u001b[39mmodel_field(\n\u001b[1;32m 908\u001b[0m common_field[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mschema\u001b[39m\u001b[38;5;124m'\u001b[39m],\n\u001b[1;32m 909\u001b[0m serialization_exclude\u001b[38;5;241m=\u001b[39mcommon_field[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mserialization_exclude\u001b[39m\u001b[38;5;124m'\u001b[39m],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 913\u001b[0m metadata\u001b[38;5;241m=\u001b[39mcommon_field[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmetadata\u001b[39m\u001b[38;5;124m'\u001b[39m],\n\u001b[1;32m 914\u001b[0m )\n\u001b[0;32m--> 916\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_generate_dc_field_schema\u001b[39m(\n\u001b[1;32m 917\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 918\u001b[0m name: \u001b[38;5;28mstr\u001b[39m,\n\u001b[1;32m 919\u001b[0m field_info: FieldInfo,\n\u001b[1;32m 920\u001b[0m decorators: DecoratorInfos,\n\u001b[1;32m 921\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m core_schema\u001b[38;5;241m.\u001b[39mDataclassField:\n\u001b[1;32m 922\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Prepare a DataclassField to represent the parameter/field, of a dataclass.\"\"\"\u001b[39;00m\n\u001b[1;32m 923\u001b[0m common_field \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_common_field_schema(name, field_info, decorators)\n", + "File \u001b[0;32m~/job/zep-proprietary/venv/lib/python3.11/site-packages/pydantic/_internal/_generate_schema.py:1114\u001b[0m, in \u001b[0;36m_common_field_schema\u001b[0;34m(self, name, field_info, decorators)\u001b[0m\n\u001b[1;32m 1108\u001b[0m json_schema_extra \u001b[38;5;241m=\u001b[39m field_info\u001b[38;5;241m.\u001b[39mjson_schema_extra\n\u001b[1;32m 1110\u001b[0m metadata \u001b[38;5;241m=\u001b[39m build_metadata_dict(\n\u001b[1;32m 1111\u001b[0m js_annotation_functions\u001b[38;5;241m=\u001b[39m[get_json_schema_update_func(json_schema_updates, json_schema_extra)]\n\u001b[1;32m 1112\u001b[0m )\n\u001b[0;32m-> 1114\u001b[0m alias_generator \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_config_wrapper\u001b[38;5;241m.\u001b[39malias_generator\n\u001b[1;32m 1115\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m alias_generator \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 1116\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_apply_alias_generator_to_field_info(alias_generator, field_info, name)\n", + "\u001b[0;31mAttributeError\u001b[0m: 'FieldInfo' object has no attribute 'deprecated'" + ] + } + ], + "source": [ + "from uuid import uuid4\n", + "\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", + "from langchain_community.memory.zep_cloud_memory import ZepCloudMemory\n", + "from langchain_community.retrievers import ZepCloudRetriever\n", + "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_core.messages import AIMessage, HumanMessage\n", + "from langchain_openai import OpenAI\n", + "\n", + "session_id = str(uuid4()) # This is a unique identifier for the session" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Provide your OpenAI key\n", + "import getpass\n", + "\n", + "openai_key = getpass.getpass()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Provide your Zep API key. See https://help.getzep.com/projects#api-keys\n", + "\n", + "zep_api_key = getpass.getpass()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initialize the Zep Chat Message History Class and initialize the Agent\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "search = WikipediaAPIWrapper()\n", + "tools = [\n", + " Tool(\n", + " name=\"Search\",\n", + " func=search.run,\n", + " description=(\n", + " \"useful for when you need to search online for answers. You should ask\"\n", + " \" targeted questions\"\n", + " ),\n", + " ),\n", + "]\n", + "\n", + "# Set up Zep Chat History\n", + "memory = ZepCloudMemory(\n", + " session_id=session_id,\n", + " api_key=zep_api_key,\n", + " return_messages=True,\n", + " memory_key=\"chat_history\",\n", + ")\n", + "\n", + "# Initialize the agent\n", + "llm = OpenAI(temperature=0, openai_api_key=openai_key)\n", + "agent_chain = initialize_agent(\n", + " tools,\n", + " llm,\n", + " agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,\n", + " verbose=True,\n", + " memory=memory,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add some history data\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Preload some messages into the memory. The default message window is 12 messages. We want to push beyond this to demonstrate auto-summarization.\n", + "test_history = [\n", + " {\"role\": \"human\", \"content\": \"Who was Octavia Butler?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American\"\n", + " \" science fiction author.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"content\": \"Which books of hers were made into movies?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"The most well-known adaptation of Octavia Butler's work is the FX series\"\n", + " \" Kindred, based on her novel of the same name.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"content\": \"Who were her contemporaries?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R.\"\n", + " \" Delany, and Joanna Russ.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"content\": \"What awards did she win?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur\"\n", + " \" Fellowship.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"content\": \"Which other women sci-fi writers might I want to read?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": \"You might want to read Ursula K. Le Guin or Joanna Russ.\",\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"content\": (\n", + " \"Write a short synopsis of Butler's book, Parable of the Sower. What is it\"\n", + " \" about?\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"Parable of the Sower is a science fiction novel by Octavia Butler,\"\n", + " \" published in 1993. It follows the story of Lauren Olamina, a young woman\"\n", + " \" living in a dystopian future where society has collapsed due to\"\n", + " \" environmental disasters, poverty, and violence.\"\n", + " ),\n", + " \"metadata\": {\"foo\": \"bar\"},\n", + " },\n", + "]\n", + "\n", + "for msg in test_history:\n", + " memory.chat_memory.add_message(\n", + " (\n", + " HumanMessage(content=msg[\"content\"])\n", + " if msg[\"role\"] == \"human\"\n", + " else AIMessage(content=msg[\"content\"])\n", + " ),\n", + " metadata=msg.get(\"metadata\", {}),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run the agent\n", + "\n", + "Doing so will automatically add the input and response to the Zep memory.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T14:34:37.613049Z", + "start_time": "2024-05-10T14:34:35.883359Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m\n", + "AI: Parable of the Sower is highly relevant to contemporary society as it explores themes of environmental degradation, social and economic inequality, and the struggle for survival in a chaotic world. It also delves into issues of race, gender, and religion, making it a thought-provoking and timely read.\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "{'input': \"What is the book's relevance to the challenges facing contemporary society?\",\n", + " 'chat_history': [HumanMessage(content=\"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\\nOctavia Butler won the Hugo Award, the Nebula Award, and the MacArthur Fellowship.\\nUrsula K. Le Guin is known for novels like The Left Hand of Darkness and The Dispossessed.\\nJoanna Russ is the author of the influential feminist science fiction novel The Female Man.\\nMargaret Atwood is known for works like The Handmaid's Tale and the MaddAddam trilogy.\\nConnie Willis is an award-winning author of science fiction and fantasy, known for novels like Doomsday Book.\\nOctavia Butler is a pioneering black female science fiction author, known for Kindred and the Parable series.\\nOctavia Estelle Butler was an acclaimed American science fiction author. While none of her books were directly adapted into movies, her novel Kindred was adapted into a TV series on FX. Butler was part of a generation of prominent science fiction writers in the 20th century, including contemporaries such as Ursula K. Le Guin, Samuel R. Delany, Chip Delany, and Nalo Hopkinson.\\nhuman: What awards did she win?\\nai: Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur Fellowship.\\nhuman: Which other women sci-fi writers might I want to read?\\nai: You might want to read Ursula K. Le Guin or Joanna Russ.\\nhuman: Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\\nai: Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.\")],\n", + " 'output': 'Parable of the Sower is highly relevant to contemporary society as it explores themes of environmental degradation, social and economic inequality, and the struggle for survival in a chaotic world. It also delves into issues of race, gender, and religion, making it a thought-provoking and timely read.'}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_chain.invoke(\n", + " input=\"What is the book's relevance to the challenges facing contemporary society?\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Inspect the Zep memory\n", + "\n", + "Note the summary, and that the history has been enriched with token counts, UUIDs, and timestamps.\n", + "\n", + "Summaries are biased towards the most recent messages.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T14:35:11.437446Z", + "start_time": "2024-05-10T14:35:10.664076Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Octavia Estelle Butler was an acclaimed American science fiction author. While none of her books were directly adapted into movies, her novel Kindred was adapted into a TV series on FX. Butler was part of a generation of prominent science fiction writers in the 20th century, including contemporaries such as Ursula K. Le Guin, Samuel R. Delany, Chip Delany, and Nalo Hopkinson.\n", + "\n", + "\n", + "Conversation Facts: \n", + "Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\n", + "\n", + "Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur Fellowship.\n", + "\n", + "Ursula K. Le Guin is known for novels like The Left Hand of Darkness and The Dispossessed.\n", + "\n", + "Joanna Russ is the author of the influential feminist science fiction novel The Female Man.\n", + "\n", + "Margaret Atwood is known for works like The Handmaid's Tale and the MaddAddam trilogy.\n", + "\n", + "Connie Willis is an award-winning author of science fiction and fantasy, known for novels like Doomsday Book.\n", + "\n", + "Octavia Butler is a pioneering black female science fiction author, known for Kindred and the Parable series.\n", + "\n", + "Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993.\n", + "\n", + "The novel follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.\n", + "\n", + "Parable of the Sower explores themes of environmental degradation, social and economic inequality, and the struggle for survival in a chaotic world.\n", + "\n", + "The novel also delves into issues of race, gender, and religion, making it a thought-provoking and timely read.\n", + "\n", + "human :\n", + " {'content': \"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R. Delany, and Joanna Russ.\\nOctavia Butler won the Hugo Award, the Nebula Award, and the MacArthur Fellowship.\\nUrsula K. Le Guin is known for novels like The Left Hand of Darkness and The Dispossessed.\\nJoanna Russ is the author of the influential feminist science fiction novel The Female Man.\\nMargaret Atwood is known for works like The Handmaid's Tale and the MaddAddam trilogy.\\nConnie Willis is an award-winning author of science fiction and fantasy, known for novels like Doomsday Book.\\nOctavia Butler is a pioneering black female science fiction author, known for Kindred and the Parable series.\\nParable of the Sower is a science fiction novel by Octavia Butler, published in 1993.\\nThe novel follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.\\nParable of the Sower explores themes of environmental degradation, social and economic inequality, and the struggle for survival in a chaotic world.\\nThe novel also delves into issues of race, gender, and religion, making it a thought-provoking and timely read.\\nOctavia Estelle Butler was an acclaimed American science fiction author. While none of her books were directly adapted into movies, her novel Kindred was adapted into a TV series on FX. Butler was part of a generation of prominent science fiction writers in the 20th century, including contemporaries such as Ursula K. Le Guin, Samuel R. Delany, Chip Delany, and Nalo Hopkinson.\\nhuman: Which other women sci-fi writers might I want to read?\\nai: You might want to read Ursula K. Le Guin or Joanna Russ.\\nhuman: Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\\nai: Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.\\nhuman: What is the book's relevance to the challenges facing contemporary society?\\nai: Parable of the Sower is highly relevant to contemporary society as it explores themes of environmental degradation, social and economic inequality, and the struggle for survival in a chaotic world. It also delves into issues of race, gender, and religion, making it a thought-provoking and timely read.\", 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': None, 'example': False}\n" + ] + } + ], + "source": [ + "def print_messages(messages):\n", + " for m in messages:\n", + " print(m.type, \":\\n\", m.dict())\n", + "\n", + "\n", + "print(memory.chat_memory.zep_summary)\n", + "print(\"\\n\")\n", + "print(\"Conversation Facts: \")\n", + "facts = memory.chat_memory.zep_facts\n", + "for fact in facts:\n", + " print(fact + \"\\n\")\n", + "print_messages(memory.chat_memory.messages)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Vector search over the Zep memory\n", + "\n", + "Zep provides native vector search over historical conversation memory via the `ZepRetriever`.\n", + "\n", + "You can use the `ZepRetriever` with chains that support passing in a Langchain `Retriever` object.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T14:35:33.023765Z", + "start_time": "2024-05-10T14:35:32.613576Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "content='Which other women sci-fi writers might I want to read?' created_at='2024-05-10T14:34:16.714292Z' metadata=None role='human' role_type=None token_count=12 updated_at='0001-01-01T00:00:00Z' uuid_='64ca1fae-8db1-4b4f-8a45-9b0e57e88af5' 0.8960460126399994\n" + ] + } + ], + "source": [ + "retriever = ZepCloudRetriever(\n", + " session_id=session_id,\n", + " api_key=zep_api_key,\n", + ")\n", + "\n", + "search_results = memory.chat_memory.search(\"who are some famous women sci-fi authors?\")\n", + "for r in search_results:\n", + " if r.score > 0.8: # Only print results with similarity of 0.8 or higher\n", + " print(r.message, r.score)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/docs/integrations/platforms/microsoft.mdx b/docs/docs/integrations/platforms/microsoft.mdx index 10364c07328..11ef1cdd7fe 100644 --- a/docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/docs/integrations/platforms/microsoft.mdx @@ -271,6 +271,26 @@ See a [usage example](/docs/integrations/retrievers/azure_ai_search). from langchain.retrievers import AzureAISearchRetriever ``` +## Tools + +### Azure Container Apps dynamic sessions + +We need to get the `POOL_MANAGEMENT_ENDPOINT` environment variable from the Azure Container Apps service. +See the instructions [here](https://python.langchain.com/v0.2/docs/integrations/tools/azure_dynamic_sessions/#setup). + +We need to install a python package. + +```bash +pip install langchain-azure-dynamic-sessions +``` + +See a [usage example](/docs/integrations/tools/azure_dynamic_sessions). + +```python +from langchain_azure_dynamic_sessions import SessionsPythonREPLTool +``` + + ## Toolkits ### Azure AI Services diff --git a/docs/docs/integrations/providers/langchain_decorators.mdx b/docs/docs/integrations/providers/langchain_decorators.mdx index 1c80bcc2bfb..d719f90b298 100644 --- a/docs/docs/integrations/providers/langchain_decorators.mdx +++ b/docs/docs/integrations/providers/langchain_decorators.mdx @@ -205,7 +205,7 @@ For chat models is very useful to define prompt as a set of message templates... def simulate_conversation(human_input:str, agent_role:str="a pirate"): """ ## System message - - note the `:system` sufix inside the tag + - note the `:system` suffix inside the tag ``` diff --git a/docs/docs/integrations/providers/premai.md b/docs/docs/integrations/providers/premai.md index 7ec2ca5fa84..a15a9c0d40b 100644 --- a/docs/docs/integrations/providers/premai.md +++ b/docs/docs/integrations/providers/premai.md @@ -1,6 +1,6 @@ # PremAI ->[PremAI](https://app.premai.io) is a unified platform that lets you build powerful production-ready GenAI-powered applications with the least effort so that you can focus more on user experience and overall growth. +[PremAI](https://premai.io/) is an all-in-one platform that simplifies the creation of robust, production-ready applications powered by Generative AI. By streamlining the development process, PremAI allows you to concentrate on enhancing user experience and driving overall growth for your application. You can quickly start using our platform [here](https://docs.premai.io/quick-start). ## ChatPremAI @@ -9,36 +9,27 @@ This example goes over how to use LangChain to interact with different chat mode ### Installation and setup -We start by installing langchain and premai-sdk. You can type the following command to install: +We start by installing `langchain` and `premai-sdk`. You can type the following command to install: ```bash pip install premai langchain ``` -Before proceeding further, please make sure that you have made an account on PremAI and already started a project. If not, then here's how you can start for free: - -1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/). - -2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. - -3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. - -4. Head over to LaunchPad (the one with 🚀 icon). And there deploy your model of choice. Your default model will be `gpt-4`. You can also set and fix different generation parameters (like max-tokens, temperature, etc) and also pre-set your system prompt. - -Congratulations on creating your first deployed application on PremAI 🎉 Now we can use langchain to interact with our application. +Before proceeding further, please make sure that you have made an account on PremAI and already created a project. If not, please refer to the [quick start](https://docs.premai.io/introduction) guide to get started with the PremAI platform. Create your first project and grab your API key. ```python from langchain_core.messages import HumanMessage, SystemMessage from langchain_community.chat_models import ChatPremAI ``` -### Setup ChatPrem instance in LangChain +### Setup PremAI client in LangChain -Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error. +Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is `8`. But make sure you use your project-id, otherwise it will throw error. -To use langchain with prem, you do not need to pass any model name or set any parameters with our chat client. All of those will use the default model name and parameters of the LaunchPad model. +To use langchain with prem, you do not need to pass any model name or set any parameters with our chat-client. By default it will use the model name and parameters used in the [LaunchPad](https://docs.premai.io/get-started/launchpad). + +> Note: If you change the `model` or any other parameters like `temperature` or `max_tokens` while setting the client, it will override existing default configurations, that was used in LaunchPad. -`NOTE:` If you change the `model_name` or any other parameter like `temperature` while setting the client, it will override existing default configurations. ```python import os @@ -50,21 +41,19 @@ if "PREMAI_API_KEY" not in os.environ: chat = ChatPremAI(project_id=8) ``` -### Calling the Model +### Chat Completions -Now you are all set. We can now start by interacting with our application. `ChatPremAI` supports two methods `invoke` (which is the same as `generate`) and `stream`. +`ChatPremAI` supports two methods: `invoke` (which is the same as `generate`) and `stream`. The first one will give us a static result. Whereas the second one will stream tokens one by one. Here's how you can generate chat-like completions. -### Generation - ```python human_message = HumanMessage(content="Who are you?") chat.invoke([human_message]) ``` -The above looks interesting, right? I set my default launchpad system-prompt as: `Always sound like a pirate` You can also, override the default system prompt if you need to. Here's how you can do it. +You can provide system prompt here like this: ```python system_message = SystemMessage(content="You are a friendly assistant.") @@ -82,16 +71,13 @@ chat.invoke( ) ``` +> If you are going to place system prompt here, then it will override your system prompt that was fixed while deploying the application from the platform. -### Important notes: - -Before proceeding further, please note that the current version of ChatPrem does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) are not supported. - -We will provide support for those two above parameters in later versions. +> Please note that the current version of ChatPremAI does not support parameters: [n](https://platform.openai.com/docs/api-reference/chat/create#chat-create-n) and [stop](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop). ### Streaming -And finally, here's how you do token streaming for dynamic chat-like applications. +In this section, let's see how we can stream tokens using langchain and PremAI. Here's how you do it. ```python import sys @@ -101,7 +87,7 @@ for chunk in chat.stream("hello how are you"): sys.stdout.flush() ``` -Similar to above, if you want to override the system-prompt and the generation parameters, here's how you can do it. +Similar to above, if you want to override the system-prompt and the generation parameters, you need to add the following: ```python import sys @@ -114,47 +100,30 @@ for chunk in chat.stream( sys.stdout.flush() ``` -## Embedding +This will stream tokens one after the other. -In this section, we are going to discuss how we can get access to different embedding models using `PremEmbeddings`. Let's start by doing some imports and defining our embedding object +## PremEmbeddings + +In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key. ```python -from langchain_community.embeddings import PremEmbeddings -``` - -Once we import our required modules, let's set up our client. For now, let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise, it will throw an error. - - -```python - import os import getpass +from langchain_community.embeddings import PremEmbeddings + if os.environ.get("PREMAI_API_KEY") is None: os.environ["PREMAI_API_KEY"] = getpass.getpass("PremAI API Key:") -# Define a model as a required parameter here since there is no default embedding model +``` + +We support lots of state of the art embedding models. You can view our list of supported LLMs and embedding models [here](https://docs.premai.io/get-started/supported-models). For now let's go for `text-embedding-3-large` model for this example. . + +```python model = "text-embedding-3-large" embedder = PremEmbeddings(project_id=8, model=model) -``` -We have defined our embedding model. We support a lot of embedding models. Here is a table that shows the number of embedding models we support. - - -| Provider | Slug | Context Tokens | -|-------------|------------------------------------------|----------------| -| cohere | embed-english-v3.0 | N/A | -| openai | text-embedding-3-small | 8191 | -| openai | text-embedding-3-large | 8191 | -| openai | text-embedding-ada-002 | 8191 | -| replicate | replicate/all-mpnet-base-v2 | N/A | -| together | togethercomputer/Llama-2-7B-32K-Instruct | N/A | -| mistralai | mistral-embed | 4096 | - -To change the model, you simply need to copy the `slug` and access your embedding model. Now let's start using our embedding model with a single query followed by multiple queries (which is also called as a document) - -```python query = "Hello, this is a test query" query_result = embedder.embed_query(query) @@ -162,8 +131,11 @@ query_result = embedder.embed_query(query) print(query_result[:5]) ``` + +Setting `model_name` argument in mandatory for PremAIEmbeddings unlike chat. + -Finally, let's embed a document +Finally, let's embed some sample document ```python documents = [ @@ -178,4 +150,20 @@ doc_result = embedder.embed_documents(documents) # of the first document vector print(doc_result[0][:5]) -``` \ No newline at end of file +``` + +```python +print(f"Dimension of embeddings: {len(query_result)}") +``` +Dimension of embeddings: 3072 + +```python +doc_result[:5] +``` +>Result: +> +>[-0.02129288576543331, + 0.0008162345038726926, + -0.004556538071483374, + 0.02918623760342598, + -0.02547479420900345] \ No newline at end of file diff --git a/docs/docs/integrations/providers/upstage.ipynb b/docs/docs/integrations/providers/upstage.ipynb index 1355f523171..b43bfe163d2 100644 --- a/docs/docs/integrations/providers/upstage.ipynb +++ b/docs/docs/integrations/providers/upstage.ipynb @@ -115,13 +115,13 @@ "source": [ "from langchain_upstage import UpstageEmbeddings\n", "\n", - "embeddings = UpstageEmbeddings()\n", + "embeddings = UpstageEmbeddings(model=\"solar-embedding-1-large\")\n", "doc_result = embeddings.embed_documents(\n", - " [\"Sam is a teacher.\", \"This is another document\"]\n", + " [\"Sung is a professor.\", \"This is another document\"]\n", ")\n", "print(doc_result)\n", "\n", - "query_result = embeddings.embed_query(\"What does Sam do?\")\n", + "query_result = embeddings.embed_query(\"What does Sung do?\")\n", "print(query_result)" ] }, diff --git a/docs/docs/integrations/providers/zep.mdx b/docs/docs/integrations/providers/zep.mdx index 220a35c910e..c7e8c038bd6 100644 --- a/docs/docs/integrations/providers/zep.mdx +++ b/docs/docs/integrations/providers/zep.mdx @@ -28,38 +28,68 @@ In addition to Zep Open Source's memory management features, Zep Cloud offers: - **Dialog Classification**: Instantly and accurately classify chat dialog. Understand user intent and emotion, segment users, and more. Route chains based on semantic context, and trigger events. - **Structured Data Extraction**: Quickly extract business data from chat conversations using a schema you define. Understand what your Assistant should ask for next in order to complete its task. -> Interested in Zep Cloud? See [Zep Cloud Installation Guide](https://help.getzep.com/sdks), [Zep Cloud Message History Example](https://help.getzep.com/langchain/examples/messagehistory-example), [Zep Cloud Vector Store Example](https://help.getzep.com/langchain/examples/vectorstore-example) -## Open Source Installation and Setup -> Zep Open Source project: [https://github.com/getzep/zep](https://github.com/getzep/zep) -> -> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/) +## Zep Open Source +Zep offers an open source version with a self-hosted option. +Please refer to the [Zep Open Source](https://github.com/getzep/zep) repo for more information. +You can also find Zep Open Source compatible [Retriever](/docs/integrations/retrievers/zep_memorystore), [Vector Store](/docs/integrations/vectorstores/zep) and [Memory](/docs/integrations/memory/zep_memory) examples -1. Install the Zep service. See the [Zep Quick Start Guide](https://docs.getzep.com/deployment/quickstart/). +## Zep Cloud Installation and Setup -2. Install the Zep Python SDK: +[Zep Cloud Docs](https://help.getzep.com) + +1. Install the Zep Cloud SDK: ```bash -pip install zep_python +pip install zep_cloud +``` +or +```bash +poetry add zep_cloud ``` ## Memory -Zep's [Memory API](https://docs.getzep.com/sdk/chat_history/) persists your app's chat history and metadata to a Session, enriches the memory, automatically generates summaries, and enables vector similarity search over historical chat messages and summaries. +Zep's Memory API persists your users' chat history and metadata to a [Session](https://help.getzep.com/chat-history-memory/sessions), enriches the memory, and +enables vector similarity search over historical chat messages and dialog summaries. -There are two approaches to populating your prompt with chat history: +Zep offers several approaches to populating prompts with context from historical conversations. -1. Retrieve the most recent N messages (and potentionally a summary) from a Session and use them to construct your prompt. -2. Search over the Session's chat history for messages that are relevant and use them to construct your prompt. +### Perpetual Memory +This is the default memory type. +Salient facts from the dialog are extracted and stored in a Fact Table. +This is updated in real-time as new messages are added to the Session. +Every time you call the Memory API to get a Memory, Zep returns the Fact Table, the most recent messages (per your Message Window setting), and a summary of the most recent messages prior to the Message Window. +The combination of the Fact Table, summary, and the most recent messages in a prompts provides both factual context and nuance to the LLM. -Both of these approaches may be useful, with the first providing the LLM with context as to the most recent interactions with a human. The second approach enables you to look back further in the chat history and retrieve messages that are relevant to the current conversation in a token-efficient manner. +### Summary Retriever Memory +Returns the most recent messages and a summary of past messages relevant to the current conversation, +enabling you to provide your Assistant with helpful context from past conversations +### Message Window Buffer Memory +Returns the most recent N messages from the current conversation. + +Additionally, Zep enables vector similarity searches for Messages or Summaries stored within its system. + +This feature lets you populate prompts with past conversations that are contextually similar to a specific query, +organizing the results by a similarity Score. + +`ZepCloudChatMessageHistory` and `ZepCloudMemory` classes can be imported to interact with Zep Cloud APIs. + +`ZepCloudChatMessageHistory` is compatible with `RunnableWithMessageHistory`. ```python -from langchain.memory import ZepMemory +from langchain_community.chat_message_histories import ZepCloudChatMessageHistory ``` -See a [RAG App Example here](/docs/integrations/memory/zep_memory). +See a [Perpetual Memory Example here](/docs/integrations/memory/zep_cloud_chat_message_history). + +You can use `ZepCloudMemory` together with agents that support Memory. +```python +from langchain.memory import ZepCloudMemory +``` + +See a [Memory RAG Example here](/docs/integrations/memory/zep_memory_cloud). ## Retriever @@ -67,24 +97,24 @@ Zep's Memory Retriever is a LangChain Retriever that enables you to retrieve mes The Retriever supports searching over both individual messages and summaries of conversations. The latter is useful for providing rich, but succinct context to the LLM as to relevant past conversations. -Zep's Memory Retriever supports both similarity search and [Maximum Marginal Relevance (MMR) reranking](https://docs.getzep.com/sdk/search_query/). MMR search is useful for ensuring that the retrieved messages are diverse and not too similar to each other +Zep's Memory Retriever supports both similarity search and [Maximum Marginal Relevance (MMR) reranking](https://help.getzep.com/working-with-search#how-zeps-mmr-re-ranking-works). MMR search is useful for ensuring that the retrieved messages are diverse and not too similar to each other -See a [usage example](/docs/integrations/retrievers/zep_memorystore). +See a [usage example](/docs/integrations/retrievers/zep_cloud_memorystore). ```python -from langchain_community.retrievers import ZepRetriever +from langchain_community.retrievers import ZepCloudRetriever ``` ## Vector store -Zep's [Document VectorStore API](https://docs.getzep.com/sdk/documents/) enables you to store and retrieve documents using vector similarity search. Zep doesn't require you to understand +Zep's [Document VectorStore API](https://help.getzep.com/document-collections) enables you to store and retrieve documents using vector similarity search. Zep doesn't require you to understand distance functions, types of embeddings, or indexing best practices. You just pass in your chunked documents, and Zep handles the rest. -Zep supports both similarity search and [Maximum Marginal Relevance (MMR) reranking](https://docs.getzep.com/sdk/search_query/). +Zep supports both similarity search and [Maximum Marginal Relevance (MMR) reranking](https://help.getzep.com/working-with-search#how-zeps-mmr-re-ranking-works). MMR search is useful for ensuring that the retrieved documents are diverse and not too similar to each other. ```python -from langchain_community.vectorstores import ZepVectorStore +from langchain_community.vectorstores import ZepCloudVectorStore ``` -See a [usage example](/docs/integrations/vectorstores/zep). \ No newline at end of file +See a [usage example](/docs/integrations/vectorstores/zep_cloud). \ No newline at end of file diff --git a/docs/docs/integrations/retrievers/milvus_hybrid_search.ipynb b/docs/docs/integrations/retrievers/milvus_hybrid_search.ipynb new file mode 100644 index 00000000000..2aac7424758 --- /dev/null +++ b/docs/docs/integrations/retrievers/milvus_hybrid_search.ipynb @@ -0,0 +1,636 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "# Milvus Hybrid Search\n", + "\n", + "> [Milvus](https://milvus.io/docs) is an open-source vector database built to power embedding similarity search and AI applications. Milvus makes unstructured data search more accessible, and provides a consistent user experience regardless of the deployment environment.\n", + "\n", + "This notebook goes over how to use the Milvus Hybrid Search retriever, which combines the strengths of both dense and sparse vector search.\n", + "\n", + "For more reference please go to [Milvus Multi-Vector Search](https://milvus.io/docs/multi-vector-search.md)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "## Prerequisites\n", + "### Install dependencies\n", + "You need to prepare to install the following dependencies\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet pymilvus[model] langchain-milvus langchain-openai" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "Import necessary modules and classes" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from pymilvus import (\n", + " Collection,\n", + " CollectionSchema,\n", + " DataType,\n", + " FieldSchema,\n", + " WeightedRanker,\n", + " connections,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import PromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_milvus.retrievers import MilvusCollectionHybridSearchRetriever\n", + "from langchain_milvus.utils.sparse import BM25SparseEmbedding\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "### Start the Milvus service\n", + "\n", + "Please refer to the [Milvus documentation](https://milvus.io/docs/install_standalone-docker.md) to start the Milvus service.\n", + "\n", + "After starting milvus, you need to specify your milvus connection URI.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "CONNECTION_URI = \"http://localhost:19530\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "### Prepare OpenAI API Key\n", + "\n", + "Please refer to the [OpenAI documentation](https://platform.openai.com/account/api-keys) to obtain your OpenAI API key, and set it as an environment variable.\n", + "\n", + "```shell\n", + "export OPENAI_API_KEY=\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Prepare data and Load\n", + "### Prepare dense and sparse embedding functions\n", + "\n", + " Let us fictionalize 10 fake descriptions of novels. In actual production, it may be a large amount of text data." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "texts = [\n", + " \"In 'The Whispering Walls' by Ava Moreno, a young journalist named Sophia uncovers a decades-old conspiracy hidden within the crumbling walls of an ancient mansion, where the whispers of the past threaten to destroy her own sanity.\",\n", + " \"In 'The Last Refuge' by Ethan Blackwood, a group of survivors must band together to escape a post-apocalyptic wasteland, where the last remnants of humanity cling to life in a desperate bid for survival.\",\n", + " \"In 'The Memory Thief' by Lila Rose, a charismatic thief with the ability to steal and manipulate memories is hired by a mysterious client to pull off a daring heist, but soon finds themselves trapped in a web of deceit and betrayal.\",\n", + " \"In 'The City of Echoes' by Julian Saint Clair, a brilliant detective must navigate a labyrinthine metropolis where time is currency, and the rich can live forever, but at a terrible cost to the poor.\",\n", + " \"In 'The Starlight Serenade' by Ruby Flynn, a shy astronomer discovers a mysterious melody emanating from a distant star, which leads her on a journey to uncover the secrets of the universe and her own heart.\",\n", + " \"In 'The Shadow Weaver' by Piper Redding, a young orphan discovers she has the ability to weave powerful illusions, but soon finds herself at the center of a deadly game of cat and mouse between rival factions vying for control of the mystical arts.\",\n", + " \"In 'The Lost Expedition' by Caspian Grey, a team of explorers ventures into the heart of the Amazon rainforest in search of a lost city, but soon finds themselves hunted by a ruthless treasure hunter and the treacherous jungle itself.\",\n", + " \"In 'The Clockwork Kingdom' by Augusta Wynter, a brilliant inventor discovers a hidden world of clockwork machines and ancient magic, where a rebellion is brewing against the tyrannical ruler of the land.\",\n", + " \"In 'The Phantom Pilgrim' by Rowan Welles, a charismatic smuggler is hired by a mysterious organization to transport a valuable artifact across a war-torn continent, but soon finds themselves pursued by deadly assassins and rival factions.\",\n", + " \"In 'The Dreamwalker's Journey' by Lyra Snow, a young dreamwalker discovers she has the ability to enter people's dreams, but soon finds herself trapped in a surreal world of nightmares and illusions, where the boundaries between reality and fantasy blur.\",\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use the [OpenAI Embedding](https://platform.openai.com/docs/guides/embeddings) to generate dense vectors, and the [BM25 algorithm](https://en.wikipedia.org/wiki/Okapi_BM25) to generate sparse vectors.\n", + "\n", + "Initialize dense embedding function and get dimension" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1536" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dense_embedding_func = OpenAIEmbeddings()\n", + "dense_dim = len(dense_embedding_func.embed_query(texts[1]))\n", + "dense_dim" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Initialize sparse embedding function.\n", + "\n", + "Note that the output of sparse embedding is a set of sparse vectors, which represents the index and weight of the keywords of the input text." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{0: 0.4270424944042204,\n", + " 21: 1.845826690498331,\n", + " 22: 1.845826690498331,\n", + " 23: 1.845826690498331,\n", + " 24: 1.845826690498331,\n", + " 25: 1.845826690498331,\n", + " 26: 1.845826690498331,\n", + " 27: 1.2237754316221157,\n", + " 28: 1.845826690498331,\n", + " 29: 1.845826690498331,\n", + " 30: 1.845826690498331,\n", + " 31: 1.845826690498331,\n", + " 32: 1.845826690498331,\n", + " 33: 1.845826690498331,\n", + " 34: 1.845826690498331,\n", + " 35: 1.845826690498331,\n", + " 36: 1.845826690498331,\n", + " 37: 1.845826690498331,\n", + " 38: 1.845826690498331,\n", + " 39: 1.845826690498331}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sparse_embedding_func = BM25SparseEmbedding(corpus=texts)\n", + "sparse_embedding_func.embed_query(texts[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Milvus Collection and load data\n", + "\n", + "Initialize connection URI and establish connection" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "connections.connect(uri=CONNECTION_URI)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define field names and their data types" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "pk_field = \"doc_id\"\n", + "dense_field = \"dense_vector\"\n", + "sparse_field = \"sparse_vector\"\n", + "text_field = \"text\"\n", + "fields = [\n", + " FieldSchema(\n", + " name=pk_field,\n", + " dtype=DataType.VARCHAR,\n", + " is_primary=True,\n", + " auto_id=True,\n", + " max_length=100,\n", + " ),\n", + " FieldSchema(name=dense_field, dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n", + " FieldSchema(name=sparse_field, dtype=DataType.SPARSE_FLOAT_VECTOR),\n", + " FieldSchema(name=text_field, dtype=DataType.VARCHAR, max_length=65_535),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a collection with the defined schema" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "schema = CollectionSchema(fields=fields, enable_dynamic_field=False)\n", + "collection = Collection(\n", + " name=\"IntroductionToTheNovels\", schema=schema, consistency_level=\"Strong\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define index for dense and sparse vectors" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "dense_index = {\"index_type\": \"FLAT\", \"metric_type\": \"IP\"}\n", + "collection.create_index(\"dense_vector\", dense_index)\n", + "sparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\n", + "collection.create_index(\"sparse_vector\", sparse_index)\n", + "collection.flush()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Insert entities into the collection and load the collection" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "entities = []\n", + "for text in texts:\n", + " entity = {\n", + " dense_field: dense_embedding_func.embed_documents([text])[0],\n", + " sparse_field: sparse_embedding_func.embed_documents([text])[0],\n", + " text_field: text,\n", + " }\n", + " entities.append(entity)\n", + "collection.insert(entities)\n", + "collection.load()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Build RAG chain with Retriever\n", + "### Create the Retriever\n", + "\n", + "Define search parameters for sparse and dense fields, and create a retriever" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "sparse_search_params = {\"metric_type\": \"IP\"}\n", + "dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n", + "retriever = MilvusCollectionHybridSearchRetriever(\n", + " collection=collection,\n", + " rerank=WeightedRanker(0.5, 0.5),\n", + " anns_fields=[dense_field, sparse_field],\n", + " field_embeddings=[dense_embedding_func, sparse_embedding_func],\n", + " field_search_params=[dense_search_params, sparse_search_params],\n", + " top_k=3,\n", + " text_field=text_field,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "In the input parameters of this Retriever, we use a dense embedding and a sparse embedding to perform hybrid search on the two fields of this Collection, and use WeightedRanker for reranking. Finally, 3 top-K Documents will be returned." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content=\"In 'The Lost Expedition' by Caspian Grey, a team of explorers ventures into the heart of the Amazon rainforest in search of a lost city, but soon finds themselves hunted by a ruthless treasure hunter and the treacherous jungle itself.\", metadata={'doc_id': '449281835035545843'}),\n", + " Document(page_content=\"In 'The Phantom Pilgrim' by Rowan Welles, a charismatic smuggler is hired by a mysterious organization to transport a valuable artifact across a war-torn continent, but soon finds themselves pursued by deadly assassins and rival factions.\", metadata={'doc_id': '449281835035545845'}),\n", + " Document(page_content=\"In 'The Dreamwalker's Journey' by Lyra Snow, a young dreamwalker discovers she has the ability to enter people's dreams, but soon finds herself trapped in a surreal world of nightmares and illusions, where the boundaries between reality and fantasy blur.\", metadata={'doc_id': '449281835035545846'})]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "retriever.invoke(\"What are the story about ventures?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Build the RAG chain\n", + "\n", + "Initialize ChatOpenAI and define a prompt template" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "llm = ChatOpenAI()\n", + "\n", + "PROMPT_TEMPLATE = \"\"\"\n", + "Human: You are an AI assistant, and provides answers to questions by using fact based and statistical information when possible.\n", + "Use the following pieces of information to provide a concise answer to the question enclosed in tags.\n", + "\n", + "\n", + "{context}\n", + "\n", + "\n", + "\n", + "{question}\n", + "\n", + "\n", + "Assistant:\"\"\"\n", + "\n", + "prompt = PromptTemplate(\n", + " template=PROMPT_TEMPLATE, input_variables=[\"context\", \"question\"]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "Define a function for formatting documents" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "def format_docs(docs):\n", + " return \"\\n\\n\".join(doc.page_content for doc in docs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "Define a chain using the retriever and other components" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "rag_chain = (\n", + " {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n", + " | prompt\n", + " | llm\n", + " | StrOutputParser()\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "Perform a query using the defined chain" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "\"Lila Rose has written 'The Memory Thief,' which follows a charismatic thief with the ability to steal and manipulate memories as they navigate a daring heist and a web of deceit and betrayal.\"" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "rag_chain.invoke(\"What novels has Lila written and what are their contents?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "Drop the collection" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "collection.drop()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb index c26e04d0fd2..ff44f6c6d95 100644 --- a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb @@ -162,12 +162,14 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_openai import OpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", " name=\"genre\",\n", - " description=\"The genres of the movie\",\n", + " description=\"The genres of the movie. \"\n", + " \"It only supports equal and contain comparisons. \"\n", + " \"Here are some examples: genre = [' A '], genre = [' A ', 'B'], contain (genre, 'A')\",\n", " type=\"list[string]\",\n", " ),\n", " # If you want to include length of a list, just define it as a new column\n", @@ -193,7 +195,7 @@ " ),\n", "]\n", "document_content_description = \"Brief summary of a movie\"\n", - "llm = OpenAI(temperature=0)\n", + "llm = ChatOpenAI(temperature=0, model_name=\"gpt-4o\")\n", "retriever = SelfQueryRetriever.from_llm(\n", " llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n", ")" diff --git a/docs/docs/integrations/retrievers/zep_cloud_memorystore.ipynb b/docs/docs/integrations/retrievers/zep_cloud_memorystore.ipynb new file mode 100644 index 00000000000..6b0c1c674ea --- /dev/null +++ b/docs/docs/integrations/retrievers/zep_cloud_memorystore.ipynb @@ -0,0 +1,470 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "# Zep Cloud\n", + "## Retriever Example for [Zep Cloud](https://docs.getzep.com/)\n", + "\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", + "\n", + "> [Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", + "\n", + "> See [Zep Cloud Installation Guide](https://help.getzep.com/sdks) and more [Zep Cloud Langchain Examples](https://github.com/getzep/zep-python/tree/main/examples)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Retriever Example\n", + "\n", + "This notebook demonstrates how to search historical chat message histories using the [Zep Long-term Memory Store](https://www.getzep.com/).\n", + "\n", + "We'll demonstrate:\n", + "\n", + "1. Adding conversation history to the Zep memory store.\n", + "2. Vector search over the conversation history: \n", + " 1. With a similarity search over chat messages\n", + " 2. Using maximal marginal relevance re-ranking of a chat message search\n", + " 3. Filtering a search using metadata filters\n", + " 4. A similarity search over summaries of the chat messages\n", + " 5. Using maximal marginal relevance re-ranking of a summary search\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import getpass\n", + "import time\n", + "from uuid import uuid4\n", + "\n", + "from langchain_community.memory.zep_cloud_memory import ZepCloudMemory\n", + "from langchain_community.retrievers import ZepCloudRetriever\n", + "from langchain_core.messages import AIMessage, HumanMessage\n", + "\n", + "# Provide your Zep API key.\n", + "zep_api_key = getpass.getpass()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initialize the Zep Chat Message History Class and add a chat message history to the memory store\n", + "\n", + "**NOTE:** Unlike other Retrievers, the content returned by the Zep Retriever is session/user specific. A `session_id` is required when instantiating the Retriever." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "session_id = str(uuid4()) # This is a unique identifier for the user/session\n", + "\n", + "# Initialize the Zep Memory Class\n", + "zep_memory = ZepCloudMemory(session_id=session_id, api_key=zep_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# Preload some messages into the memory. The default message window is 4 messages. We want to push beyond this to demonstrate auto-summarization.\n", + "test_history = [\n", + " {\"role\": \"human\", \"role_type\": \"user\", \"content\": \"Who was Octavia Butler?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American\"\n", + " \" science fiction author.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"Which books of hers were made into movies?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"The most well-known adaptation of Octavia Butler's work is the FX series\"\n", + " \" Kindred, based on her novel of the same name.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"role_type\": \"user\", \"content\": \"Who were her contemporaries?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"Octavia Butler's contemporaries included Ursula K. Le Guin, Samuel R.\"\n", + " \" Delany, and Joanna Russ.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"role_type\": \"user\", \"content\": \"What awards did she win?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"Octavia Butler won the Hugo Award, the Nebula Award, and the MacArthur\"\n", + " \" Fellowship.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"Which other women sci-fi writers might I want to read?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": \"You might want to read Ursula K. Le Guin or Joanna Russ.\",\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": (\n", + " \"Write a short synopsis of Butler's book, Parable of the Sower. What is it\"\n", + " \" about?\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"Parable of the Sower is a science fiction novel by Octavia Butler,\"\n", + " \" published in 1993. It follows the story of Lauren Olamina, a young woman\"\n", + " \" living in a dystopian future where society has collapsed due to\"\n", + " \" environmental disasters, poverty, and violence.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"What is the setting of the book?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"The book is set in a dystopian future in the 2020s, where society has\"\n", + " \" collapsed due to climate change and economic crises.\"\n", + " ),\n", + " },\n", + " {\"role\": \"human\", \"role_type\": \"user\", \"content\": \"Who is the protagonist?\"},\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"The protagonist of the book is Lauren Olamina, a young woman who possesses\"\n", + " \" 'hyperempathy', the ability to feel pain and other sensations she\"\n", + " \" witnesses.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"What is the main theme of the book?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"The main theme of the book is survival in the face of drastic societal\"\n", + " \" change and collapse. It also explores themes of adaptability, community,\"\n", + " \" and the human capacity for change.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"What is the 'Parable of the Sower'?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"The 'Parable of the Sower' is a biblical parable that Butler uses as a\"\n", + " \" metaphor in the book. In the parable, a sower scatters seeds, some of\"\n", + " \" which fall on fertile ground and grow, while others fall on rocky ground\"\n", + " \" or among thorns and fail to grow. The parable is used to illustrate the\"\n", + " \" importance of receptivity and preparedness in the face of change.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"What is Butler's writing style like?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"role_type\": \"assistant\",\n", + " \"content\": (\n", + " \"Butler's writing style is known for its clarity, directness, and\"\n", + " \" psychological insight. Her narratives often involve complex, diverse\"\n", + " \" characters and explore themes of race, gender, and power.\"\n", + " ),\n", + " },\n", + " {\n", + " \"role\": \"human\",\n", + " \"role_type\": \"user\",\n", + " \"content\": \"What other books has she written?\",\n", + " },\n", + " {\n", + " \"role\": \"ai\",\n", + " \"content\": (\n", + " \"In addition to 'Parable of the Sower', Butler has written several other\"\n", + " \" notable works, including 'Kindred', 'Dawn', and 'Parable of the Talents'.\"\n", + " ),\n", + " },\n", + "]\n", + "\n", + "for msg in test_history:\n", + " zep_memory.chat_memory.add_message(\n", + " HumanMessage(content=msg[\"content\"])\n", + " if msg[\"role\"] == \"human\"\n", + " else AIMessage(content=msg[\"content\"])\n", + " )\n", + "\n", + "time.sleep(\n", + " 10\n", + ") # Wait for the messages to be embedded and summarized, this happens asynchronously." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Use the Zep Retriever to vector search over the Zep memory\n", + "\n", + "Zep provides native vector search over historical conversation memory. Embedding happens automatically.\n", + "\n", + "NOTE: Embedding of messages occurs asynchronously, so the first query may not return results. Subsequent queries will return results as the embeddings are generated." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T14:32:06.613100Z", + "start_time": "2024-05-10T14:32:06.369301Z" + }, + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content=\"What is the 'Parable of the Sower'?\", metadata={'score': 0.9333381652832031, 'uuid': 'bebc441c-a32d-44a1-ae61-968e7b3d4956', 'created_at': '2024-05-10T05:02:01.857627Z', 'token_count': 11, 'role': 'human'}),\n", + " Document(page_content=\"The 'Parable of the Sower' is a biblical parable that Butler uses as a metaphor in the book. In the parable, a sower scatters seeds, some of which fall on fertile ground and grow, while others fall on rocky ground or among thorns and fail to grow. The parable is used to illustrate the importance of receptivity and preparedness in the face of change.\", metadata={'score': 0.8757256865501404, 'uuid': '193c60d8-2b7b-4eb1-a4be-c2d8afd92991', 'created_at': '2024-05-10T05:02:01.97174Z', 'token_count': 82, 'role': 'ai'}),\n", + " Document(page_content=\"Write a short synopsis of Butler's book, Parable of the Sower. What is it about?\", metadata={'score': 0.8641344904899597, 'uuid': 'fc78901d-a625-4530-ba63-1ae3e3b11683', 'created_at': '2024-05-10T05:02:00.942994Z', 'token_count': 21, 'role': 'human'}),\n", + " Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler, published in 1993. It follows the story of Lauren Olamina, a young woman living in a dystopian future where society has collapsed due to environmental disasters, poverty, and violence.', metadata={'score': 0.8581685125827789, 'uuid': '91f2cda4-276e-446d-96bf-07d34e5af616', 'created_at': '2024-05-10T05:02:01.05577Z', 'token_count': 54, 'role': 'ai'}),\n", + " Document(page_content=\"In addition to 'Parable of the Sower', Butler has written several other notable works, including 'Kindred', 'Dawn', and 'Parable of the Talents'.\", metadata={'score': 0.8076582252979279, 'uuid': 'e3994519-9a90-410c-b14c-2c652f6d184f', 'created_at': '2024-05-10T05:02:02.401682Z', 'token_count': 37, 'role': 'ai'})]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "zep_retriever = ZepCloudRetriever(\n", + " api_key=zep_api_key,\n", + " session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever\n", + " top_k=5,\n", + ")\n", + "\n", + "await zep_retriever.ainvoke(\"Who wrote Parable of the Sower?\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also use the Zep sync API to retrieve results:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T14:31:37.611570Z", + "start_time": "2024-05-10T14:31:37.298903Z" + }, + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler set in a dystopian future in the 2020s. The story follows Lauren Olamina, a young woman living in a society that has collapsed due to environmental disasters, poverty, and violence. The novel explores themes of societal breakdown, the struggle for survival, and the search for a better future.', metadata={'score': 0.8473024368286133, 'uuid': 'e4689f8e-33be-4a59-a9c2-e5ef5dd70f74', 'created_at': '2024-05-10T05:02:02.713123Z', 'token_count': 76})]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "zep_retriever.invoke(\"Who wrote Parable of the Sower?\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reranking using MMR (Maximal Marginal Relevance)\n", + "\n", + "Zep has native, SIMD-accelerated support for reranking results using MMR. This is useful for removing redundancy in results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "zep_retriever = ZepCloudRetriever(\n", + " api_key=zep_api_key,\n", + " session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever\n", + " top_k=5,\n", + " search_type=\"mmr\",\n", + " mmr_lambda=0.5,\n", + ")\n", + "\n", + "await zep_retriever.ainvoke(\"Who wrote Parable of the Sower?\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using metadata filters to refine search results\n", + "\n", + "Zep supports filtering results by metadata. This is useful for filtering results by entity type, or other metadata.\n", + "\n", + "More information here: https://help.getzep.com/document-collections#searching-a-collection-with-hybrid-vector-search" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "filter = {\"where\": {\"jsonpath\": '$[*] ? (@.baz == \"qux\")'}}\n", + "\n", + "await zep_retriever.ainvoke(\n", + " \"Who wrote Parable of the Sower?\", config={\"metadata\": filter}\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Searching over Summaries with MMR Reranking\n", + "\n", + "Zep automatically generates summaries of chat messages. These summaries can be searched over using the Zep Retriever. Since a summary is a distillation of a conversation, they're more likely to match your search query and offer rich, succinct context to the LLM.\n", + "\n", + "Successive summaries may include similar content, with Zep's similarity search returning the highest matching results but with little diversity.\n", + "MMR re-ranks the results to ensure that the summaries you populate into your prompt are both relevant and each offers additional information to the LLM." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T14:32:56.877960Z", + "start_time": "2024-05-10T14:32:56.517360Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='Parable of the Sower is a science fiction novel by Octavia Butler set in a dystopian future in the 2020s. The story follows Lauren Olamina, a young woman living in a society that has collapsed due to environmental disasters, poverty, and violence. The novel explores themes of societal breakdown, the struggle for survival, and the search for a better future.', metadata={'score': 0.8473024368286133, 'uuid': 'e4689f8e-33be-4a59-a9c2-e5ef5dd70f74', 'created_at': '2024-05-10T05:02:02.713123Z', 'token_count': 76}),\n", + " Document(page_content='The \\'Parable of the Sower\\' refers to a new religious belief system that the protagonist, Lauren Olamina, develops over the course of the novel. As her community disintegrates due to climate change, economic collapse, and social unrest, Lauren comes to believe that humanity must adapt and \"shape God\" in order to survive. The \\'Parable of the Sower\\' is the foundational text of this new religion, which Lauren calls \"Earthseed\", that emphasizes the inevitability of change and the need for humanity to take an active role in shaping its own future. This parable is a central thematic element of the novel, representing the protagonist\\'s search for meaning and purpose in the face of societal upheaval.', metadata={'score': 0.8466987311840057, 'uuid': '1f1a44eb-ebd8-4617-ac14-0281099bd770', 'created_at': '2024-05-10T05:02:07.541073Z', 'token_count': 146}),\n", + " Document(page_content='The dialog discusses the central themes of Octavia Butler\\'s acclaimed science fiction novel \"Parable of the Sower.\" The main theme is survival in the face of drastic societal collapse, and the importance of adaptability, community, and the human capacity for change. The \"Parable of the Sower,\" a biblical parable, serves as a metaphorical framework for the novel, illustrating the need for receptivity and preparedness when confronting transformative upheaval.', metadata={'score': 0.8283970355987549, 'uuid': '4158a750-3ccd-45ce-ab88-fed5ba68b755', 'created_at': '2024-05-10T05:02:06.510068Z', 'token_count': 91})]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "zep_retriever = ZepCloudRetriever(\n", + " api_key=zep_api_key,\n", + " session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever\n", + " top_k=3,\n", + " search_scope=\"summary\",\n", + " search_type=\"mmr\",\n", + " mmr_lambda=0.5,\n", + ")\n", + "\n", + "await zep_retriever.ainvoke(\"Who wrote Parable of the Sower?\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/docs/integrations/retrievers/zep_memorystore.ipynb b/docs/docs/integrations/retrievers/zep_memorystore.ipynb index 7cb2126423a..b572d4d62bb 100644 --- a/docs/docs/integrations/retrievers/zep_memorystore.ipynb +++ b/docs/docs/integrations/retrievers/zep_memorystore.ipynb @@ -6,7 +6,7 @@ "collapsed": false }, "source": [ - "# Zep\n", + "# Zep Open Source\n", "## Retriever Example for [Zep](https://docs.getzep.com/)\n", "\n", "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", diff --git a/docs/docs/integrations/retrievers/zilliz_cloud_pipeline.ipynb b/docs/docs/integrations/retrievers/zilliz_cloud_pipeline.ipynb new file mode 100644 index 00000000000..bfbf6ff9234 --- /dev/null +++ b/docs/docs/integrations/retrievers/zilliz_cloud_pipeline.ipynb @@ -0,0 +1,222 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Zilliz Cloud Pipeline\n", + "\n", + "> [Zilliz Cloud Pipelines](https://docs.zilliz.com/docs/pipelines) transform your unstructured data to a searchable vector collection, chaining up the embedding, ingestion, search, and deletion of your data.\n", + "> \n", + "> Zilliz Cloud Pipelines are available in the Zilliz Cloud Console and via RestFul APIs.\n", + "\n", + "This notebook demonstrates how to prepare Zilliz Cloud Pipelines and use the them via a LangChain Retriever." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prepare Zilliz Cloud Pipelines\n", + "\n", + "To get pipelines ready for LangChain Retriever, you need to create and configure the services in Zilliz Cloud." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**1. Set up Database**\n", + "\n", + "- [Register with Zilliz Cloud](https://docs.zilliz.com/docs/register-with-zilliz-cloud)\n", + "- [Create a cluster](https://docs.zilliz.com/docs/create-cluster)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**2. Create Pipelines**\n", + "\n", + "- [Document ingestion, search, deletion](https://docs.zilliz.com/docs/pipelines-doc-data)\n", + "- [Text ingestion, search, deletion](https://docs.zilliz.com/docs/pipelines-text-data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use LangChain Retriever" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-milvus" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_milvus import ZillizCloudPipelineRetriever\n", + "\n", + "retriever = ZillizCloudPipelineRetriever(\n", + " pipeline_ids={\n", + " \"ingestion\": \"\", # skip this line if you do NOT need to add documents\n", + " \"search\": \"\", # skip this line if you do NOT need to get relevant documents\n", + " \"deletion\": \"\", # skip this line if you do NOT need to delete documents\n", + " },\n", + " token=\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add documents\n", + "\n", + "To add documents, you can use the method `add_texts` or `add_doc_url`, which inserts documents from a list of texts or a presigned/public url with corresponding metadata into the store." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- if using a **text ingestion pipeline**, you can use the method `add_texts`, which inserts a batch of texts with the corresponding metadata into the Zilliz Cloud storage.\n", + "\n", + " **Arguments:**\n", + " - `texts`: A list of text strings.\n", + " - `metadata`: A key-value dictionary of metadata will be inserted as preserved fields required by ingestion pipeline. Defaults to None.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# retriever.add_texts(\n", + "# texts = [\"example text 1e\", \"example text 2\"],\n", + "# metadata={\"\": \"\"} # skip this line if no preserved field is required by the ingestion pipeline\n", + "# )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- if using a **document ingestion pipeline**, you can use the method `add_doc_url`, which inserts a document from url with the corresponding metadata into the Zilliz Cloud storage.\n", + "\n", + " **Arguments:**\n", + " - `doc_url`: A document url.\n", + " - `metadata`: A key-value dictionary of metadata will be inserted as preserved fields required by ingestion pipeline. Defaults to None.\n", + "\n", + "The following example works with a document ingestion pipeline, which requires milvus version as metadata. We will use an [example document](https://publicdataset.zillizcloud.com/milvus_doc.md) describing how to delete entities in Milvus v2.3.x. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'token_usage': 1247, 'doc_name': 'milvus_doc.md', 'num_chunks': 6}" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "retriever.add_doc_url(\n", + " doc_url=\"https://publicdataset.zillizcloud.com/milvus_doc.md\",\n", + " metadata={\"version\": \"v2.3.x\"},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get relevant documents\n", + "\n", + "To query the retriever, you can use the method `get_relevant_documents`, which returns a list of LangChain Document objects.\n", + "\n", + "**Arguments:**\n", + "- `query`: String to find relevant documents for.\n", + "- `top_k`: The number of results. Defaults to 10.\n", + "- `offset`: The number of records to skip in the search result. Defaults to 0.\n", + "- `output_fields`: The extra fields to present in output.\n", + "- `filter`: The Milvus expression to filter search results. Defaults to \"\".\n", + "- `run_manager`: The callbacks handler to use." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='# Delete Entities\\nThis topic describes how to delete entities in Milvus. \\nMilvus supports deleting entities by primary key or complex boolean expressions. Deleting entities by primary key is much faster and lighter than deleting them by complex boolean expressions. This is because Milvus executes queries first when deleting data by complex boolean expressions. \\nDeleted entities can still be retrieved immediately after the deletion if the consistency level is set lower than Strong.\\nEntities deleted beyond the pre-specified span of time for Time Travel cannot be retrieved again.\\nFrequent deletion operations will impact the system performance. \\nBefore deleting entities by comlpex boolean expressions, make sure the collection has been loaded.\\nDeleting entities by complex boolean expressions is not an atomic operation. Therefore, if it fails halfway through, some data may still be deleted.\\nDeleting entities by complex boolean expressions is supported only when the consistency is set to Bounded. For details, see Consistency.\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\nPrepare the boolean expression that filters the entities to delete. \\nMilvus supports deleting entities by primary key or complex boolean expressions. For more information on expression rules and supported operators, see Boolean Expression Rules.', metadata={'id': 448986959321277978, 'distance': 0.7871403694152832}),\n", + " Document(page_content='# Delete Entities\\n## Prepare boolean expression\\n### Simple boolean expression\\nUse a simple expression to filter data with primary key values of 0 and 1: \\n```python\\nexpr = \"book_id in [0,1]\"\\n```\\\\\\n\\\\\\n# Delete Entities\\n## Prepare boolean expression\\n### Complex boolean expression\\nTo filter entities that meet specific conditions, define complex boolean expressions. \\nFilter entities whose word_count is greater than or equal to 11000: \\n```python\\nexpr = \"word_count >= 11000\"\\n``` \\nFilter entities whose book_name is not Unknown: \\n```python\\nexpr = \"book_name != Unknown\"\\n``` \\nFilter entities whose primary key values are greater than 5 and word_count is smaller than or equal to 9999: \\n```python\\nexpr = \"book_id > 5 && word_count <= 9999\"\\n```', metadata={'id': 448986959321277979, 'distance': 0.7775762677192688}),\n", + " Document(page_content='# Delete Entities\\n## Delete entities\\nDelete the entities with the boolean expression you created. Milvus returns the ID list of the deleted entities.\\n```python\\nfrom pymilvus import Collection\\ncollection = Collection(\"book\") # Get an existing collection.\\ncollection.delete(expr)\\n``` \\nParameter\\tDescription\\nexpr\\tBoolean expression that specifies the entities to delete.\\npartition_name (optional)\\tName of the partition to delete entities from.\\\\\\n\\\\\\n# Upsert Entities\\nThis topic describes how to upsert entities in Milvus. \\nUpserting is a combination of insert and delete operations. In the context of a Milvus vector database, an upsert is a data-level operation that will overwrite an existing entity if a specified field already exists in a collection, and insert a new entity if the specified value doesn’t already exist. \\nThe following example upserts 3,000 rows of randomly generated data as the example data. When performing upsert operations, it\\'s important to note that the operation may compromise performance. This is because the operation involves deleting data during execution.', metadata={'id': 448986959321277980, 'distance': 0.680284857749939}),\n", + " Document(page_content='# Upsert Entities\\n## Flush data\\nWhen data is upserted into Milvus it is updated and inserted into segments. Segments have to reach a certain size to be sealed and indexed. Unsealed segments will be searched brute force. In order to avoid this with any remainder data, it is best to call flush(). The flush() call will seal any remaining segments and send them for indexing. It is important to only call this method at the end of an upsert session. Calling it too often will cause fragmented data that will need to be cleaned later on.\\\\\\n\\\\\\n# Upsert Entities\\n## Limits\\nUpdating primary key fields is not supported by upsert().\\nupsert() is not applicable and an error can occur if autoID is set to True for primary key fields.', metadata={'id': 448986959321277983, 'distance': 0.5672488212585449}),\n", + " Document(page_content='# Upsert Entities\\n## Prepare data\\nFirst, prepare the data to upsert. The type of data to upsert must match the schema of the collection, otherwise Milvus will raise an exception. \\nMilvus supports default values for scalar fields, excluding a primary key field. This indicates that some fields can be left empty during data inserts or upserts. For more information, refer to Create a Collection. \\n```python\\n# Generate data to upsert\\n\\nimport random\\nnb = 3000\\ndim = 8\\nvectors = [[random.random() for _ in range(dim)] for _ in range(nb)]\\ndata = [\\n[i for i in range(nb)],\\n[str(i) for i in range(nb)],\\n[i for i in range(10000, 10000+nb)],\\nvectors,\\n[str(\"dy\"*i) for i in range(nb)]\\n]\\n```', metadata={'id': 448986959321277981, 'distance': 0.5107149481773376}),\n", + " Document(page_content='# Upsert Entities\\n## Upsert data\\nUpsert the data to the collection. \\n```python\\nfrom pymilvus import Collection\\ncollection = Collection(\"book\") # Get an existing collection.\\nmr = collection.upsert(data)\\n``` \\nParameter\\tDescription\\ndata\\tData to upsert into Milvus.\\npartition_name (optional)\\tName of the partition to upsert data into.\\ntimeout (optional)\\tAn optional duration of time in seconds to allow for the RPC. If it is set to None, the client keeps waiting until the server responds or error occurs.\\nAfter upserting entities into a collection that has previously been indexed, you do not need to re-index the collection, as Milvus will automatically create an index for the newly upserted data. For more information, refer to Can indexes be created after inserting vectors?', metadata={'id': 448986959321277982, 'distance': 0.4341375529766083})]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "retriever.get_relevant_documents(\n", + " \"Can users delete entities by complex boolean expressions?\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "develop", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/docs/integrations/stores/cassandra.ipynb b/docs/docs/integrations/stores/cassandra.ipynb new file mode 100644 index 00000000000..bd9413da773 --- /dev/null +++ b/docs/docs/integrations/stores/cassandra.ipynb @@ -0,0 +1,228 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Cassandra\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cassandra\n", + "\n", + "[Cassandra](https://cassandra.apache.org/) is a NoSQL, row-oriented, highly scalable and highly available database.\n", + "\n", + "`CassandraByteStore` needs the `cassio` package to be installed:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet cassio" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The Store takes the following parameters:\n", + "\n", + "* table: The table where to store the data.\n", + "* session: (Optional) The cassandra driver session. If not provided, the cassio resolved session will be used.\n", + "* keyspace: (Optional) The keyspace of the table. If not provided, the cassio resolved keyspace will be used.\n", + "* setup_mode: (Optional) The mode used to create the Cassandra table (SYNC, ASYNC or OFF). Defaults to SYNC." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CassandraByteStore\n", + "\n", + "The `CassandraByteStore` is an implementation of `ByteStore` that stores the data in your Cassandra instance.\n", + "The store keys must be strings and will be mapped to the `row_id` column of the Cassandra table.\n", + "The store `bytes` values are mapped to the `body_blob` column of the Cassandra table." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.storage import CassandraByteStore" + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Init from a cassandra driver Session\n", + "\n", + "You need to create a `cassandra.cluster.Session` object, as described in the [Cassandra driver documentation](https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/cluster/#module-cassandra.cluster). The details vary (e.g. with network settings and authentication), but this might be something like:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from cassandra.cluster import Cluster\n", + "\n", + "cluster = Cluster()\n", + "session = cluster.connect()" + ] + }, + { + "cell_type": "markdown", + "source": [ + "You need to provide the name of an existing keyspace of the Cassandra instance:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "CASSANDRA_KEYSPACE = input(\"CASSANDRA_KEYSPACE = \")" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Creating the store:" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[b'v1', b'v2']\n" + ] + } + ], + "source": [ + "store = CassandraByteStore(\n", + " table=\"my_store\",\n", + " session=session,\n", + " keyspace=CASSANDRA_KEYSPACE,\n", + ")\n", + "\n", + "store.mset([(\"k1\", b\"v1\"), (\"k2\", b\"v2\")])\n", + "print(store.mget([\"k1\", \"k2\"]))" + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Init from cassio\n", + "\n", + "It's also possible to use cassio to configure the session and keyspace." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "import cassio\n", + "\n", + "cassio.init(contact_points=\"127.0.0.1\", keyspace=CASSANDRA_KEYSPACE)\n", + "\n", + "store = CassandraByteStore(\n", + " table=\"my_store\",\n", + ")\n", + "\n", + "store.mset([(\"k1\", b\"v1\"), (\"k2\", b\"v2\")])\n", + "print(store.mget([\"k1\", \"k2\"]))" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "### Usage with CacheBackedEmbeddings\n", + "\n", + "You may use the `CassandraByteStore` in conjunction with a [`CacheBackedEmbeddings`](/docs/how_to/caching_embeddings) to cache the result of embeddings computations.\n" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "from langchain.embeddings import CacheBackedEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", + "\n", + "cassio.init(contact_points=\"127.0.0.1\", keyspace=CASSANDRA_KEYSPACE)\n", + "\n", + "store = CassandraByteStore(\n", + " table=\"my_store\",\n", + ")\n", + "\n", + "embeddings = CacheBackedEmbeddings.from_bytes_store(\n", + " underlying_embeddings=OpenAIEmbeddings(), document_embedding_cache=store\n", + ")" + ], + "metadata": { + "collapsed": false + } + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/docs/integrations/text_embedding/clova.ipynb b/docs/docs/integrations/text_embedding/clova.ipynb new file mode 100644 index 00000000000..73004e8a7a3 --- /dev/null +++ b/docs/docs/integrations/text_embedding/clova.ipynb @@ -0,0 +1,86 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Clova Embeddings\n", + "[Clova](https://api.ncloud-docs.com/docs/ai-naver-clovastudio-summary) offers an embeddings service\n", + "\n", + "This example goes over how to use LangChain to interact with Clova inference for text embedding.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"CLOVA_EMB_API_KEY\"] = \"\"\n", + "os.environ[\"CLOVA_EMB_APIGW_API_KEY\"] = \"\"\n", + "os.environ[\"CLOVA_EMB_APP_ID\"] = \"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.embeddings import ClovaEmbeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embeddings = ClovaEmbeddings()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "query_text = \"This is a test query.\"\n", + "query_result = embeddings.embed_query(query_text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "document_text = [\"This is a test doc1.\", \"This is a test doc2.\"]\n", + "document_result = embeddings.embed_documents(document_text)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/docs/integrations/text_embedding/premai.ipynb b/docs/docs/integrations/text_embedding/premai.ipynb index d8bf54fd43f..6c509e8fd1e 100644 --- a/docs/docs/integrations/text_embedding/premai.ipynb +++ b/docs/docs/integrations/text_embedding/premai.ipynb @@ -6,25 +6,26 @@ "source": [ "# PremAI\n", "\n", - ">[PremAI](https://app.premai.io) is an unified platform that let's you build powerful production-ready GenAI powered applications with least effort, so that you can focus more on user experience and overall growth. In this section we are going to dicuss how we can get access to different embedding model using `PremAIEmbeddings`\n", + "[PremAI](https://premai.io/) is an all-in-one platform that simplifies the creation of robust, production-ready applications powered by Generative AI. By streamlining the development process, PremAI allows you to concentrate on enhancing user experience and driving overall growth for your application. You can quickly start using our platform [here](https://docs.premai.io/quick-start).\n", "\n", - "## Installation and Setup\n", + "### Installation and setup\n", "\n", - "We start by installing langchain and premai-sdk. You can type the following command to install:\n", + "We start by installing `langchain` and `premai-sdk`. You can type the following command to install:\n", "\n", "```bash\n", "pip install premai langchain\n", "```\n", "\n", - "Before proceeding further, please make sure that you have made an account on Prem and already started a project. If not, then here's how you can start for free:\n", + "Before proceeding further, please make sure that you have made an account on PremAI and already created a project. If not, please refer to the [quick start](https://docs.premai.io/introduction) guide to get started with the PremAI platform. Create your first project and grab your API key." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## PremEmbeddings\n", "\n", - "1. Sign in to [PremAI](https://app.premai.io/accounts/login/), if you are coming for the first time and create your API key [here](https://app.premai.io/api_keys/).\n", - "\n", - "2. Go to [app.premai.io](https://app.premai.io) and this will take you to the project's dashboard. \n", - "\n", - "3. Create a project and this will generate a project-id (written as ID). This ID will help you to interact with your deployed application. \n", - "\n", - "Congratulations on creating your first deployed application on Prem 🎉 Now we can use langchain to interact with our application. " + "In this section we are going to dicuss how we can get access to different embedding model using `PremEmbeddings` with LangChain. Lets start by importing our modules and setting our API Key. " ] }, { @@ -42,7 +43,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is 8. But make sure you use your project-id, otherwise it will throw error.\n" + "Once we imported our required modules, let's setup our client. For now let's assume that our `project_id` is `8`. But make sure you use your project-id, otherwise it will throw error.\n", + "\n", + "> Note: Setting `model_name` argument in mandatory for PremAIEmbeddings unlike [ChatPremAI.](https://python.langchain.com/v0.1/docs/integrations/chat/premai/)" ] }, { @@ -72,20 +75,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We have defined our embedding model. We support a lot of embedding models. Here is a table that shows the number of embedding models we support. \n", - "\n", - "\n", - "| Provider | Slug | Context Tokens |\n", - "|-------------|------------------------------------------|----------------|\n", - "| cohere | embed-english-v3.0 | N/A |\n", - "| openai | text-embedding-3-small | 8191 |\n", - "| openai | text-embedding-3-large | 8191 |\n", - "| openai | text-embedding-ada-002 | 8191 |\n", - "| replicate | replicate/all-mpnet-base-v2 | N/A |\n", - "| together | togethercomputer/Llama-2-7B-32K-Instruct | N/A |\n", - "| mistralai | mistral-embed | 4096 |\n", - "\n", - "To change the model, you simply need to copy the `slug` and access your embedding model. Now let's start using our embedding model with a single query followed by multiple queries (which is also called as a document)" + "We support lots of state of the art embedding models. You can view our list of supported LLMs and embedding models [here](https://docs.premai.io/get-started/supported-models). For now let's go for `text-embedding-3-large` model for this example." ] }, { diff --git a/docs/docs/integrations/text_embedding/upstage.ipynb b/docs/docs/integrations/text_embedding/upstage.ipynb index 6f2452b9785..5736dd5f532 100644 --- a/docs/docs/integrations/text_embedding/upstage.ipynb +++ b/docs/docs/integrations/text_embedding/upstage.ipynb @@ -80,7 +80,7 @@ "source": [ "from langchain_upstage import UpstageEmbeddings\n", "\n", - "embeddings = UpstageEmbeddings()" + "embeddings = UpstageEmbeddings(model=\"solar-embedding-1-large\")" ] }, { @@ -101,7 +101,7 @@ "outputs": [], "source": [ "doc_result = embeddings.embed_documents(\n", - " [\"Sam is a teacher.\", \"This is another document\"]\n", + " [\"Sung is a professor.\", \"This is another document\"]\n", ")\n", "print(doc_result)" ] @@ -123,7 +123,7 @@ }, "outputs": [], "source": [ - "query_result = embeddings.embed_query(\"What does Sam do?\")\n", + "query_result = embeddings.embed_query(\"What does Sung do?\")\n", "print(query_result)" ] }, @@ -184,7 +184,7 @@ "\n", "vectorstore = DocArrayInMemorySearch.from_texts(\n", " [\"harrison worked at kensho\", \"bears like to eat honey\"],\n", - " embedding=UpstageEmbeddings(),\n", + " embedding=UpstageEmbeddings(model=\"solar-embedding-1-large\"),\n", ")\n", "retriever = vectorstore.as_retriever()\n", "docs = retriever.invoke(\"Where did Harrison work?\")\n", diff --git a/docs/docs/integrations/tools/apify.ipynb b/docs/docs/integrations/tools/apify.ipynb index 76bf7ef164a..22a22a2fc2f 100644 --- a/docs/docs/integrations/tools/apify.ipynb +++ b/docs/docs/integrations/tools/apify.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet apify-client langchain-openai langchain" + "%pip install --upgrade --quiet apify-client langchain-community langchain-openai langchain" ] }, { diff --git a/docs/docs/integrations/tools/arxiv.ipynb b/docs/docs/integrations/tools/arxiv.ipynb index bb206a44da7..09a685ef936 100644 --- a/docs/docs/integrations/tools/arxiv.ipynb +++ b/docs/docs/integrations/tools/arxiv.ipynb @@ -24,7 +24,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet arxiv" + "%pip install --upgrade --quiet langchain-community arxiv" ] }, { diff --git a/docs/docs/integrations/tools/awslambda.ipynb b/docs/docs/integrations/tools/awslambda.ipynb index e2c14d9bd22..08a7ed4e692 100644 --- a/docs/docs/integrations/tools/awslambda.ipynb +++ b/docs/docs/integrations/tools/awslambda.ipynb @@ -32,7 +32,8 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet boto3 > /dev/null" + "%pip install --upgrade --quiet boto3 > /dev/null\n", + "%pip install --upgrade --quiet langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/azure_dynamic_sessions.ipynb b/docs/docs/integrations/tools/azure_dynamic_sessions.ipynb index 9f70a30c015..4b41a6a1c27 100644 --- a/docs/docs/integrations/tools/azure_dynamic_sessions.ipynb +++ b/docs/docs/integrations/tools/azure_dynamic_sessions.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ " ········\n" @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -qU langchain-azure-dynamic-sessions langchain-openai langchainhub langchain" + "%pip install -qU langchain-azure-dynamic-sessions langchain-openai langchainhub langchain langchain-community" ] }, { @@ -253,7 +253,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAksAAAHFCAYAAADi7703AABztElEQVR4Ae2dB3gUVdfH/0lIoSV0Qgm9E3rooqKCBUSxgKIovIBiQ0UsvBbEgh1QFEQsSFNEsYI0FUUp0nvvvYcWSN3vnss7+202bZPsZmdm//d5lpm5c+fec35nNnu45dwgh0pgIgESIAESIAESIAESyJRAcKa5zCQBEiABEiABEiABEtAE6CzxRSABEiABEiABEiCBbAjQWcoGDm+RAAmQAAmQAAmQAJ0lvgMkQAIkQAIkQAIkkA0BOkvZwOEtEiABEiABEiABEqCzxHeABEiABEiABEiABLIhQGcpGzi8RQIkQAIkQAIkQAJ0lvgOkAAJkAAJkAAJkEA2BOgsZQOHt0iABEiABEiABEiAzhLfARIwCYGJEyciKCjI+SlUqBAqV66Mvn374uDBg04pFy5cqMvIMbdp8eLFePnllxEfH5/bR3MsP336dDRs2BCFCxfW8q1ZsybHZ7IqIDIKi/ykSZMmoWzZsjh37lyuqjl9+jRKlCiBH374wePnfvvtN8TFxaFo0aJa7tw863EjBVBw9OjRuO2221C9enWtx9VXX+1xq5s2bdLv1p49ezx+xtOCL7zwArp27YpKlSppufr06ePpoyxHAl4hQGfJKxhZCQl4j8AXX3yBJUuWYP78+RgwYAC++uordOjQARcuXMh3I+IsDR8+3OvO0vHjx9G7d2/UrFkTc+bM0fLXqVMnz/L2799f15HXChISEvDf//4Xzz77LIoXL56rakqWLIknn3wSTz/9NJKSknJ8VnaM6tGjB0JDQ/HTTz9pua+66qocnzNjgY8//hh79+7FNddcox3N3MgozpK8W75wlkaNGoWTJ0+iW7duCAsLy41YLEsCXiFQyCu1sBISIAGvEYiNjdW9FFJhx44dkZqaildffVX3dNxzzz1ea8ebFW3btg3Jycm499574Q1HQXrU5JPX9OWXX+ofV3G68pIGDhyI1157Dd9++y169eqVbRWHDh3CqVOn0L17d1x77bXZlhUnrkiRItmW8edNcXiCgy//H1reQ7Mk6R005Jo8ebJZxKIcAUSAPUsBZGyqak0Cbdq00YLL//izS9Kr0bZtW/1jLL0pnTp1Stc7I0Nb0lsiyRhmkaGunIbzcqpXhkSuuOIKXW/Pnj1zHL4Rh2HIkCFahoiICJQqVUo7h9KDZqTMhuGqVaumh2Kk56p58+Z6uK9evXr4/PPPjcecx3HjxuHmm2/Ww2lG5tdff61l+/DDD40sfRw2bBhCQkJ0T55xo3z58pqf9LRkl0ROw6mTXizhKXJKMnRYtWoV7rjjDkiPlfS8Sbp06RKGDh2qGUhPiQwvPfLIIxl6/Aydf/nlFzRr1kzrXL9+fci1JBm6lWsZ/mvVqhVWrFih8/P6j+GQ5PZ5kePOO+/Uj4mDbwwnS76RxE5NmjSBYXNxLjdv3mzczvaYV7myrZQ3SSA3BFQXMhMJkIAJCKjhN4f67jqWL1+eTpr3339f53/yySc6/48//tDXcjTS1KlTdV7nzp0dar6MQ80fcrRo0cKhfogdixYt0sX279/veOyxx3S5mTNnOtRQn/6cOXPGqCbD0ZN6d+zY4fjoo490vSNGjNB1bty4MUNdRsaDDz7oUL0rjpEjRzpEB/XD73jzzTcdY8aMMYo4lAOj63NmqJOqVas6lGPiaNCggUPNR3LMnTvXoX6gdbk///zTWVT0FI5jx4515hknqsdIMzEYq7lGDvVD7FBzYowizuNbb72l76k5TM489xNpS1hKe8JWmCrnSBczdBC5lSPlUMOq2jZpaWmO66+/3qHmpDlefPFFx7x58xzvvvuuQzk8DuUQOZQj5WzG0Fn18jiUM+mYPXu2o3Xr1g415Od46aWXHO3bt9ftf//99w417OlQTp5DOaPO5/NzouafOVQvoUdVHDt2zCG2Fw7yLhjvluRLMu7dfffdjlmzZmn71ahRwxEVFeVQvZIetWEUEk7333+/cckjCRQIARRIK2yEBEggRwKGs7R06VKHGtJyqKEH7UioScoO1VPkOHLkiK5DHAz5UZKjJDVM56hYsaKjUaNG+lxnqn/k+XLlyjnatWtnZDneeecd/ezu3budeVmd5KZeQ6YZM2ZkVZ0zX374b731Vud1ZieGo+F6TxwH1SvhUD1szuyLFy86VM+UQxwwI4mjKHyEo3sSR0QcEtWz5lBDTtq5EIcgJSXFvah2bqSeX3/9NcM91wxhKeWErWsydBCnxjWpnjFd/u2333bN1g6u1GM4xXJTdFYT5h0HDhxwllUT5/XzFSpUcKh5bM58cZLledUT6MzLz0lunCVpR2wv7RvvpdG2OJuiw0033WRk6eO+ffsc4eHhDjXMmS4/pws6SzkR4n1fEOAwnPp2M5GAmQjIsJtMFpahNFkBFB0dDfWDDRkayixt3boVMm9GJli7DlcUK1YMt99+O5TTABn6ym3yVb0yXCT6PPfcc3oIUDk8HovWtGlTVKlSxVlehnRkIrnrEKWwkKQcRWc540T9OOObb77R85lkKE/9UdUT6GUYzj0Zz7uuRHQv48m12MA1/f777/rSfUWXDGPJcJqsrHNNorMM0xlJht0kyUo11/lPRr4rC+MZ16NyDOH6EQa+TLJYQWzsrm9MTIyeSO6ury9lYd0kkFcCdJbySo7PkYCPCMiSdzVMhNWrV2snaN26dVDDLVm2JquEJKmehgxlVI8T1LAPZDl8bpOv6v3ggw/0KjVZXi/zW2TOkuppwvbt23MUsXTp0hnKiAPk6nAZ5+JIZZZq1aqlVxfKvCGZMJ8ZN3nOeN6oL7O6PMlzr1+4SlgICWvgmmSejzjGBnfjnvBxTcZqsKzyRa/skjjirh+ZDO/LZOjjzkHalPfTuO9LGVg3CeSXAJ2l/BLk8yTgZQLSQyAxe6RHIbMfGPfmDAfi8OHD7re0syW9TTK5OLfJV/VK74ksMd+yZQvU0CJkMrb0fsmEbG+kMmXK6GpkhVpm6dNPP4WaN6MnRMtk72XLlmVWTK9wkxtGfZkW8iBTnCDXJFylZ0fCLbgm6eERHvltz7XOzM7FEXf9eIt7Zm1JXk7vka/1zUou5pNAbgjQWcoNLZYlARMSqFu3rh6mmTZtmh5WMkSUuEzfffedc4Wc5EsvjCRPektyU6+uNA//yNCiDM+oib+QYb+8DBe6Nysr5CTt3LnT/RbWr1+PQYMG4b777oOa+I7GjRtDVvBl1vO2a9cu/byaUJ6hnvxkGOEFpkyZkq4asZXYzLif7qYXL8QRd/0Yzkx+m8jq3ZIVmhKo1F1fNQ8LMiTpa33zqxefJwEhUIgYSIAErE1Aeo7UZGE9pCRznNRkZyQmJkJNONZL0dVKM6eCahK4Plcr7KBWFOnhGHGKMgvcmJt6nQ14cKJWc+m5WOKoSI+XLB+X2DlG2AMPqsi2iNQvP87SWyVBDI0kjogEj5SwCWqlnA5uKPOXZO6SREmXYUHXJM+LI2Ewc72Xn3MJ6aBWw+mhyLNnz+ohVhlqVRPCdXgAmXvmryShB4ygkiKb9HZJrClJLVu2hJpwnqVoRlwmNUFdv08yjCmshaFa9aeDhIqTKo6xDL1J76KUEb1zSmq1o7MnTuKOybwsQy41QT/DkGZO9fE+CeSagPoyMJEACZiAgLEazljWnpVIxsoz91VHshpKlpWrHyC9DF39j93xzz//ZKhGxffRq+eUMyQzezOsXnJ/wJN6DZk8WQ2nJnY7VM+GQzlKejWULCFXEbMdJ06ccDZtrCRzZqgTWRnWpUsX1yx9LqvZ5OOalMOhQwy45qmAmTpkgXtYA2MVl4oS7Swuy/ulPQkHkFPKaTWcGm7LUIXq2dPhBKQNCQOghlsdDz30kENWjrmmrHQWu6m4TK5FHVnJka5QDheyJF/qzuwj72dOSW2Xolcaqgnzug7XZ9Twp0M5yDp0g4QMuOWWWxzutsiqfrFvZjJJnrx7TCTgawJB0oB64ZhIgARIwDYEpIdEekKkd0h6mnKbZIWWilkF9WMOY1gvt3WwPAmQgH0I0Fmyjy2pCQmQgAsBmYskQ29GtGuXWzmeyio9WTU3YcKEHMuyAAmQgP0JcIK3/W1MDUkgIAm89957undJ9hXLTZLJ3mrYB6+//npuHmNZEiABGxNgz5KNjUvVSIAESIAESIAE8k+APUv5Z8gaSIAESIAESIAEbEyAzpKNjUvVSIAESIAESIAE8k+AzlL+GbIGEiABEiABEiABGxNgUEovGFf23pLNOyWwn/vWBl6onlWQAAmQAAmQAAn4gIBET5JFILJPoQTizSrRWcqKTC7yxVGSHbSZSIAESIAESIAErEdg//79qFy5cpaC01nKEo3nN4ytIgR2ZGSk5w/mUDI5ORnz5s3TwfFkl3A7JrvraHf95J20u47Uz/p/eWhDa9vQl/aTbX2ks8P4Hc+KFJ2lrMjkIt8YehNHydvOUpEiRXSddnaW7KyjfMntrJ98TeyuI/XLxR9DkxalDU1qGA/FKgj7Gb/jWYmU9QBdVk8wnwRIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQKiIDD4cCW+CCkpTkKqMWMzdBZysiEOSRAAiRAAiRAAn4mkKqco1/WHUK3j5Zg3OYQ/L71uN8kKuS3ltkwCZAACZAACZAACbgRSElNw09rD+GjP3Zg5/EL+m54iAMnzie5lSy4SzpLBceaLZEACZAACZAACWRBQJykH9Ycwpjft2PvyQRdKqpwKO5vWwUVzm3FHS0rZ/Gk77PpLPmeMVsgARIgARIgARLIgoAMt/2sepLe/207dp+43JNUumgY+neogXvbVEFECDB79tYsni6YbDpLBcOZrZAACZAACZAACbgQkAnbs9YfxugF25zDbaWUk/TglTXQu21VFAm77KIkJye7POWfUzpL/uHOVkmABEiABEggIAmIkzR34xHlJG3H1qPnNIMSRULxgHKS7m9bDUXDzeeamE+igHx1qDQJkAAJkAAJ2JuAhACYv+koRiknafPhs1rZyIhCGKCG2/q0r4biEaGmBUBnybSmoWAkQAIkQAIkYA8Cy3adxJtztmD1vnitUDHVe/SfK6qjn/rIJG6zJzpLZrcQ5SMBEiABEiABixKQHqS3lZP0x/9iJBUODUFf1YskQ24lioRZRis6S5YxFQUlARIgARIgAWsQ2H8qAaPmb8P3aw5Cjb6hUHAQ7moVg0HX1ka54hHWUMJFSjpLLjB4SgIkQAIkQAIkkHcCJ88n4kMVTHLq0n1IUnGTJHVtXAFDOtdFtTJF816xn5+ks+RnA7B5EiABEiABErA6gYSkFHy2aDfG/7UL5xNTtDpX1CqDZ2+oh0aVo6yuHugsWd6EVIAESIAESIAE/ENAwgB8v/og3pm7FUfOXtJCxFaK1E5Sh9pl/SOUD1qls+QDqKySBEiABEiABOxO4N/dp/DarE1Yd+CMVrVyycJ4RvUkdW1UAcFqjpKdEp0lO1mTupAACZAACZCAjwnsPXkBb/66Bb9uOKJbkjAAj3SspVe5RajVbnZMwVZTauzYsahevToiIiLQokULLFq0KEsV+vTpg6CgoAyfhg0bOp+ZOHFihvvyzKVLl7sTnQV5QgIkQAIkQAIBTODMxWSMmL0ZnUb+pR0l6Ty6p3UVLHz6ajx0dU3Y1VESk1uqZ2n69Ol44oknIA5T+/btMX78eNx4443YtGkTqlSpkuEVfv/99/Hmm28681NSUtCkSRPceeedzjw5iYyMxNat6TfpE2eMiQRIgARIgAQCnUCKWtX21b/7dOTtUxeSNI4OtcvghS4NUDe6eEDgsZSzNHLkSPTr1w/9+/fXxhk9ejTmzp2LcePG4Y033shgsKioKMjHSD/88ANOnz6Nvn37Gln6KD1J0dHR6fJ4QQIkQAIkQAKBTkAibw/7aSO2HLm8h1utcsXwfJf6uLpOWT0qEyh8LOMsJSUlYeXKlXjuuefS2aZz585YvHhxurysLj777DNcd911qFq1aroi58+f13mpqalo2rQpXn31VTRr1ixdGdeLxMREyMdIZ89e3uNGdkb25u7IRl3G0WjPTkdDN+NoJ91EF0Mv42g3/QJBR8N2xtFuNjT0Mo5204/vaN4sKivb3pqzDb+sP6IrKKG2JHn82pq4K64yCoUEQ0ZqCioZ76Zx9Ga7ntYZpDa2U7E1zZ8OHTqESpUq4Z9//kG7du2cAo8YMQJffvllhmE0Z4H/nRw+fBgxMTGYNm0aevTo4by9dOlS7NixA40aNYI4PTJ0N3v2bKxduxa1a9d2lnM9efnllzF8+HDXLH0udRcpUiRDPjNIgARIgARIwAoEUlQcyT8OB2HegWAkpak5v3CgXXkHusSkoaj5t3DLNeKEhAT06tULZ86c0VNysqrAMj1LhgIyZOaaxNdzz3O9b5zLRO4SJUrg1ltvNbL0sU2bNpCPkWQuVPPmzTFmzBh88MEHRna649ChQzF48GBnnjhZ4ohJL5fMf/JWEo93/vz56NSpE0JDbfiWKlB219Hu+sm7bncdqZ+3/qL5rx7a0DP2C7cdx+uzt2LPyQT9QPMqJfBSl3poWNF7v2ueSZK+lC/tZ4wMpW8x45VlnKUyZcogJCQER45c7hI0VDl27BjKly9vXGZ6FIfq888/R+/evREWlv3GfcHBwWjZsiW2b9+eaV2SGR4erj/uBcSh8YVT46t63eX357XddbS7fvLu2F1H6ufPvxDeaZs2zJyjhAJ49ZdNWLD5mC5Qplg4/ntTPXRvVsmjzojMa/V+ri/sJ3V6koI9KWSGMuLkSKgA6WlxTXLtOiznes84//PPP/VQm0wOzymJY7VmzRpUqFAhp6K8TwIkQAIkQAKWJZCYkorRC7ah06i/tKMkm932v6I6/hhyFW5rXtlUjpK/IVumZ0lAydCX9A7FxcWhbdu2+OSTT7Bv3z4MHDhQc5ThsYMHD2LSpEnpuMrE7tatWyM2NjZdvlzI3CMZhpP5SdIdJ0Nv4ix99NFHGcoygwRIgARIgATsQGDxjhN44YcN2HXiglanfa3SGN6tIWqVC4xQALm1oaWcpZ49e+LkyZN45ZVXIBO2xfmRydjG6jbJE+fJNcmkre+++05P3HbNN87j4+PxwAMP6OE9CTMgq+D++usvtGrVyijCIwmQAAmQAAnYgsDxc4k6sKTs5yapbPFwvNS1Abo2rsCepGwsbClnSfR4+OGH9ScznWQSt3sSB0hmu2eVRo0aBfkwkQAJkAAJkIBdCciGt18t34e31DYlZy+lKMcI6N2mKp7qXBdRKiwAU/YELOcsZa8O75IACZAACZAACbgS2Hz4LP77/Xqs3hevs2V124jujdAkpoRrMZ5nQ4DOUjZweIsESIAESIAErErgQmIK3v9tOz77ezdSVc9S0bAQ3ZN0X9uqOrCkVfXyh9x0lvxBnW2SAAmQAAmQgA8J/L7lKF74fgMOnbmkW7kxNhrDbm6I6KgIH7Zq36rpLNnXttSMBEiABEggwAicPJ+I4T9vwk9rD2nNK5csjFduaYhr6mUfjzDAMOVaXTpLuUbGB0iABEiABEjAXARk47If1x5WEbi34HRCMlTIJPTvUANPXlcHhdXwG1P+CNBZyh8/Pk0CJEACJEACfiVwWA21fbIlGJuWrtdy1IsujrfvaIzGlUv4VS47NU5nyU7WpC4kQAIkQAIBQ0DCAUxdthdvqnAAF5KCERoShEHX1MaDV9VEWCHLbNBhCXvRWbKEmSgkCZAACZAACfw/gZ3Hz2Pod+vx755TOrN6cQfG9mmH+pVK/n8hnnmNAJ0lr6FkRSRAAiRAAiTgWwLJqWn45K9dOiRAUkoaiqj5SEM61UbJkxvUViXFfNt4ANdOZymAjU/VSYAESIAErENg65FzeGrGGmw4eFYLfVWdsni9eyzKFwtVW39tsI4iFpSUzpIFjUaRSYAESIAEAodAiupNGi+9SQu2I0mdy/Ykw25ugO7NKun93JKTkwMHhp80pbPkJ/BslgRIgARIgARyIrDjmPQmrcPa/fG66HX1y+mtSspFMrhkTuy8eZ/Okjdpsi4SIAESIAES8AIB2Z7ks7934d152yBzk4pHFMLLKgL3bc0v9yZ5oQlWkQsCdJZyAYtFSYAESIAESMDXBHafuIAhM9Zi5d7TuimZm/Tm7Y1QIaqwr5tm/VkQoLOUBRhmkwAJkAAJkEBBEpC4SRMX78Hbc7fgUnIaioUXwotd66NHXIyem1SQsrCt9AToLKXnwSsSIAESIAESKHAC+04mYMi3a/Hv7stxk9rXKo23bm+MyiWLFLgsbDAjATpLGZkwhwRIgARIgAQKhIBDber2zYr9eEVtfnshKVXHTRp6U33c27oKe5MKxAKeNUJnyTNOLEUCJEACJEACXiVw8nwihs5cj3mbjup6W1UrhXfvbIIqpdmb5FXQXqiMzpIXILIKEiABEiABEsgNgd+3HMUz367HCeUwyZ5uT3WuiwEdaiAkOCg31bBsARGgs1RAoNkMCZAACZAACSQkpeD1WZvVBrj7NIw65YthVM+maFgxinBMTIDOkomNQ9FIgARIgATsQ2CNCiz55PQ1kNAAkv7TvjqeuaEuIkJD7KOkTTWhs2RTw1ItEiABEiABcxCQ7Uo++mMnPvh9OyTYZLSKvv1ejyZoX6uMOQSkFDkSoLOUIyIWIAESIAESIIG8EdijepGe/GYNVu+L1xXc3KQiXrslFlFFQvNWIZ/yCwE6S37BzkZJgARIgATsTEBCAny78gCG/bQRCSokgGxX8tqtsbilaSU7q21b3egs2da0VIwESIAESMAfBM5eSsbz32/Az2sP6ebb1Cilht2aolIJblfiD3t4o006S96gyDpIgARIgARIQBFYte80Bn21GgdOX9RhAAZ3qoOBV9VkSACLvx10lixuQIpPAiRAAiTgfwIycXvcwh0YteDyJO6YUoXx/l3N0LxKSf8LRwnyTYDOUr4RsgISIAESIIFAJnD4zEUdEmDprsv7unWTSdzdYxEZwUncdnkv6CzZxZLUgwRIgARIoMAJzN14BM9+tw7xCcl6X7dX1Eq325tX4r5uBW4J3zZIZ8m3fFk7CZAACZCADQlcSk7Fa7M2YcrSy5G4G1WKUsNuTVGjbDEbakuV6CzxHSABEiABEiCBXBDYeuQcHvtqFbYdPa+feuDKGhii9nYLKxSci1pY1EoE6CxZyVqUlQRIgARIwG8EJHbSV//ux/CfNyIxJQ1lioVjpIrEfWWdsn6TiQ0XDAE6SwXDma2QAAmQAAlYmMA5FTvpvy6xk65SDpJsWSIOE5P9CdBZsr+NqSEJkAAJkEA+CGw8dAaPTlutN8ANCQ7CM9fXxYAONRCszpkCgwCdpcCwM7UkARIgARLIJQEZdpu6bB9e+WUTktSwW8WoCIzp1QwtqpbKZU0sbnUCdJasbkHKTwIkQAIk4HUCMuw2dOZ6/LLusK772nrl8O6dTVCyaJjX22KF5idAZ8n8NqKEJEACJEACBUhgw0EZdluFPScTUEgNtT17Qz3071CdsZMK0AZma4rOktksQnlIgARIgAT8QkCG3aaoYbdXf1bDbqlpeuNbGXbjliV+MYepGqWzZCpzUBgSIAESIAF/EDgrw27frces9ZeH3a6rf3nYrUQRDrv5wx5ma9NyEbTGjh2L6tWrIyIiAi1atMCiRYuyZLpw4ULdbRoUFJTuuGXLlnTPfPfdd2jQoAHCw8P18fvvv093nxckQAIkQAL2JSDDbjeP+Vs7SjLs9kKX+phwXxzoKNnX5rnVzFLO0vTp0/HEE0/g+eefx+rVq9GhQwfceOON2LdvX7Z6b926FYcPH3Z+ateu7Sy/ZMkS9OzZE71798batWv1sUePHli2bJmzDE9IgARIgATsSWD68n24bdxi7FXzkyqVKIwZA9uq+Uk1OD/JnubOs1aWcpZGjhyJfv36oX///qhfvz5Gjx6NmJgYjBs3LlsA5cqVQ3R0tPMTEhLiLC91dOrUCUOHDkW9evX08dprr9V1OwvxhARIgARIwFYEZG+3p2esVZvgrtdhAWTYbfagDmhWpaSt9KQy3iFgmTlLSUlJWLlyJZ577rl0mnfu3BmLFy9Ol+d+0axZM1y6dEkPsb3wwgvo2LGjs4j0LD355JPOazm5/vrrs3WWEhMTIR8jnT17Vp8mJydDPt5KRl3G0Vv1mqkeQzfjaCbZvCGLoZdx9EadZqvD0M04mk2+/Mpj6GUc81uf2Z439DKOZpPPG/IYuhnHvacS8OhXa7FF7fEmcSWfvLYWHlCr3YJV94FRxhvtFlQdhszGsaDaLah2DL2Mozfb9bROyzhLJ06cQGpqKsqXL5+Ok1wfOXIkXZ5xUaFCBXzyySd6bpM4N5MnT4b0GslcpiuvvFIXk2dzU6c89MYbb2D48OFGM87jvHnzUKRIEee1t07mz5/vrapMW4/ddbS7fvJi2V1H6mfaPx8eCyY2XH8qCFN3BONiahCKFXLg/jppqHJhC+bMST+X1eNKTVSQ72jujZGQkODRQ5ZxlgxtZLK2a5Klnu55xv26detCPkZq27Yt9u/fj3fffdfpLMk99+ezq1PKy5Dd4MGD5VQn6VmS4UDp5YqMjDSy830Uj1defhkmDA0NzXd9ZqzA7jraXT95p+yuI/Uz41+O3MkkNpwzbz42hdTAp1v36YebVymB93s2RnRkRO4qM2FpvqN5N4oxMpRTDZZxlsqUKQOZa+Tei3Ts2LEMPUPZKd2mTRtMmTLFWUTmMuW2Tlk1Jx/3JA6NL5waX9XrLr8/r+2uo931k3fH7jpSP3/+hchf2yfOJ2LcpmBsP3vZUfpP++oYelM9hIaocTcbJb6juTemMPMkWeZNCQsL08Np7t2Mct2uXTtPdNVlZBWdDM8ZSXqb3OuU4bTc1GnUxSMJkAAJkIC5CKzYcwq3jF2qHKVgFA0LwYcqyORLNzewnaNkLur2k8YyPUuCXoa+ZIl/XFwcxMmR+UgSNmDgwIHaMjI8dvDgQUyaNElfy0q3atWqoWHDhpAJ4tKjJDGV5GOkxx9/XA/JvfXWW7jlllvw448/YsGCBfj777+NIjySAAmQAAlYjIBMp/js791489ctSElzoHxhByb2b436lUpaTBOKawYClnKWJB7SyZMn8corr+iYSbGxsZg9ezaqVq2qWUosJdeYS+IgDRkyRDtQhQsX1k7TrFmzcNNNNznZSw/S119/DVkl9+KLL6JmzZqQeE6tW7d2luEJCZAACZCAdQjIJrjPfrcOs9dfXvzTtVE0rix8ALXKFbOOEpTUVAQs5SwJuYcfflh/MqM4ceLEdNnPPPMM5JNTuuOOOyAfJhIgARIgAWsT2HHsHB6YvBK7jl9QQ20SjbsB7o6riF9/PWBtxSi9XwlYzlnyKy02TgIkQAIkYFoCczYcwVPfrMGFpFS9yu2je5qjRdWSloydZFrIASoYnaUANTzVJgESIAG7EEhVc5JGzd+GD//YoVVqXb0UxFEqUyzjqmW76Ew9CpYAnaWC5c3WSIAESIAEvEjgTEIyHp++Ggu3Hte12jUsgBeRsao8EKCzlAdofIQESIAESMD/BLYcOYsH1fwk2QQ3IjQYb97WGLc2q+R/wSiB7QjQWbKdSakQCZAACdifwM9rD+GZb9fhotoQt3LJwhjfuwUaVoyyv+LU0C8E6Cz5BTsbJQESIAESyAuBlNQ0vD13Kz75a5d+/IpaZTDm7mYoWTQsL9XxGRLwiACdJY8wsRAJkAAJkIC/CZy6kITHvlqFf3ac1KIMvKomnr6+LkKC0+8Z6m852b79CNBZsp9NqREJkAAJ2I7AhoNn9Pykg/EXUURtW/L2HY3RtXFF2+lJhcxJgM6SOe1CqUiABEiABP5HYOaqAxg6cz0SU9JQtXQRfNI7DnWji5MPCRQYATpLBYaaDZEACZAACeSGgMxPGjF7Cz7/Z7d+rGPdshjdsxmiini2U3xu2mJZEsiOAJ2l7OjwHgmQAAmQgF8IxCck4dFpq/H3jhO6/ceuqYUnr6uDYM5P8os9Ar1ROkuB/gZQfxIgARIwGYFtR89hwKQVOn5S4dAQjOzRBDc2qmAyKSlOIBGgsxRI1qauJEACJGByAvM3HcUTX6/W+7tVKlEYE+6LQ4OKkSaXmuLZnQCdJbtbmPqRAAmQgAUIOBwOfKT2dntP7fGmTiH7u41V+7uV5v5uFrCe/UWks2R/G1NDEiABEjA1gYSkFDw9Yx1mrT+s5ezdpipeurkBQkOCTS03hQscAnSWAsfW1JQESIAETEfgwOkEPDBpJTYdPotCavL2K7fEolfrKqaTkwIFNgE6S4Ftf2pPAiRAAn4j8O/uU3hoykqcVJG5S6vtSsbd2wKt1PAbEwmYjQCdJbNZhPKQAAmQQAAQmLpsL4b9uBEpaQ40qBCJT+5roTbELRIAmlNFKxKgs2RFq1FmEiABErAogWQVaHL4zxsxZek+rUGXxhXwjtq6pEgYf44satKAEJtvZ0CYmUqSAAmQgP8JnDyfiIenrsIyNfwmSTbBffjqmggK4ka4/rcOJciOAJ2l7OjwHgmQAAmQgFcIbD1yDv2+XI4Dpy+iqNoId/RdzdCpQXmv1M1KSMDXBOgs+Zow6ycBEiCBACfw2+ajGPTV5UCTVUoVwaf3x6FOeW6EG+CvhaXUp7NkKXNRWBIgARKwDgEJNPnZ37vx+uzNOtBkmxqlMO6eFiipVr4xkYCVCNBZspK1KCsJkAAJWIRAUkoaXvhhPb5ZcUBLfHerGAzvFouwQgw0aRETUkwXAnSWXGDwlARIgARIIP8ETqm4SQNV/CSJo6TiTOL5Lg3wn/bVOJE7/2hZg58I0FnyE3g2SwIkQAJ2JLD9qEzkXoF9pxJQLLwQxtzdDB3rlbOjqtQpgAjQWQogY1NVEiABEvAlgYVbj+GxaatxLjEFMaUK47P7W3Iity+Bs+4CI0BnqcBQsyESIAESsCcBmcg9cfEevPrLJqiA3GhVrRQ+7t0CpTiR254GD0Ct6CwFoNGpMgmQAAl4i4BE5H5JbVvy1b+XI3Lf2aIyXusei/BCId5qgvWQgN8J0FnyuwkoAAmQAAlYk0B8QpLaCHcVluw6qSZvA0NvrIcBHWpwIrc1zUmpsyFAZykbOLxFAiRAAiSQOYGdx8+j38Tl2HMyQUfk/kBN5L62PiNyZ06LuVYnQGfJ6hak/CRAAiRQwAQWbT+u93g7dykFlUqoidx94lAvOrKApWBzJFBwBOgsFRxrtkQCJEAClicweelevPzTRqSqmdwtqpbEeDWRu0yxcMvrRQVIIDsCdJayo8N7JEACJEACmoA4R6/P2ozP/9mtr29rVgkjbmuEiFBO5OYrYn8CdJbsb2NqSAIkQAL5InBBxU16/OvVWLD5mK5nSOc6eKRjLU7kzhdVPmwlAnSWrGQtykoCJEACBUzgyJlLKiL3cmw8dFbv6zayRxN0bVyxgKVgcyTgXwJ0lvzLn62TAAmQgGkJbDh4RjtKR88morQKMDnh/jg0r1LStPJSMBLwFQE6S74iy3pJgARIwMIEFmw6ikFq6C0hKRW1yhXDF31aqi1MilhYI4pOAnknQGcp7+z4JAmQAAnYjoBsXfLFP3vw2qzLW5dcUasMPrqnOaIKh9pOVypEAp4SCPa0oFnKjR07FtWrV0dERARatGiBRYsWZSnazJkz0alTJ5QtWxaRkZFo27Yt5s6dm678xIkT9STFIBV+1vVz6dKldOV4QQIkQAJ2J5Citi4ZpsICvPK/Pd7ubhWDL/q2pKNkd8NTvxwJWMpZmj59Op544gk8//zzWL16NTp06IAbb7wR+/Zd3pPIXdu//vpLO0uzZ8/GypUr0bFjR9x88836Wdey4kgdPnw43UecMSYSIAESCBQCEmCy/6QVmLRkr/qP4+WtS0Z0b4TQEEv9TASKuahnAROw1DDcyJEj0a9fP/Tv319jGj16tO4pGjduHN54440M6OS+axoxYgR+/PFH/Pzzz2jWrJnzlvQoRUdHO695QgIkQAKBROBUInD3p/9i69HzKm5SMEb3bIobYisEEgLqSgLZErCMs5SUlKR7h5577rl0CnXu3BmLFy9Ol5fVRVpaGs6dO4dSpUqlK3L+/HlUrVoVqampaNq0KV599dV0zlS6wuoiMTFRf4z8s2fP6tPk5GTIx1vJqMs4eqteM9Vj6GYczSSbN2Qx9DKO3qjTbHUYuhlHs8mXX3kMvYxjfusz2/Or957EqPUhOJt8HmWLheHje5qhceUor/4t87fOhu2Mo7/l8Xb7hl7G0dv1+7s+Qy/j6E15PK0zSE3mc3izYV/VdejQIVSqVAn//PMP2rVr52xGeou+/PJLbN261ZmX1ck777yDN998E5s3b0a5cuV0saVLl2LHjh1o1KgRxOl5//33IcN2a9euRe3atTOt6uWXX8bw4cMz3Js2bRqKFOFqkQxgmEECJGBKAutOBWHS9mAkpwWhQhEHHqiXilLcucSUtqJQviGQkJCAXr164cyZM3puc1atWKZnyVBAhsxck/h67nmu943zr776CuLkyDCc4SjJvTZt2uiPUa59+/Zo3rw5xowZgw8++MDITnccOnQoBg8e7MwTJysmJgbSyyXzn7yVxOOdP3++nncVGmrPlSh219Hu+sm7bncd7aif/N387J+9+HzpNsh/l+uXSMOXD16FksUKe+vPl6nqsaMNXQFTP1cauTs3RoZyesoyzlKZMmUQEhKCI0eOpNPp2LFjKF++fLo89wuZGC5znWbMmIHrrrvO/Xa66+DgYLRs2RLbt29Pl+96ER4eDvm4J3FofOHU+Kped/n9eW13He2un7w7dtfRLvolqxVvshHutGX79Ff+HrXirUXwbu0oiY52TnaxYVY2on5Zkck639N3PjjrKsx1JywsTIcKkJ4W1yTXrsNyrvfkXHqU+vTpAxki69Kli/vtDNfyP641a9agQgVObswAhxkkQAKWJnD2UjL+M3G5dpSkk/7Frg0wrGs9hKTvsLe0jhSeBHxBwDI9S6K8DH317t0bcXFxOmbSJ598osMGDBw4ULOR4bGDBw9i0qRJ+locpfvuu0/PQ5LhNqNXqnDhwoiKitJlZO6R3JP5SdIdJ0Nv4ix99NFH+j7/IQESIAE7EDgUfxF9vvgX29SKtyJhIfjgrma4rkF5W03ktoOdqIM5CVjKWerZsydOnjyJV155RcdEio2N1ZOxZSWbJImV5Bpzafz48UhJScEjjzyiP4YJ7r//fkgwSknx8fF44IEHtCMlDpSEFJD4TK1atdL3+Q8JkAAJWJ3AJrUJbt+J/0L2eCtXPByfq61LYitd/g+j1XWj/CRQEAQs5SwJkIcfflh/MoNjOEDGvYULFxqnWR5HjRoF+TCRAAmQgB0JLNp+HA9NWYXziSmoU17t8da3FSqVsOdEbjvajzqZg4DlnCVzYKMUJEACJGB+At+uPIDnvluHlDQH2tQohfG947h1ifnNRglNSIDOkgmNQpFIgARIID8EZKHKmN93YOT8bbqaW5pWxNt3NEZ4oZD8VMtnSSBgCdBZCljTU3ESIAE7EpDQAC/+sAFfL9+v1Xvo6pp4unNdBAdzyZsd7U2dCoYAnaWC4cxWSIAESMDnBC6oeUkPT12FP7cdh/hGw7s1RO+21XzeLhsgAbsToLNkdwtTPxIggYAgcOzcJR1DacPBs3oz3DF3N0cnFRqAiQRIIP8E6CzlnyFrIAESIAG/Ethx7Bzu/3w5DqpYSqWLhuHT++PQrEpJv8rExknATgToLNnJmtSFBEgg4Aj8u/sUBkxagTMXk1GtdBF8+Z9WqFq6aMBxoMIk4EsCdJZ8SZd1kwAJkIAPCfyy7hAGT1+LJDWpu1mVEvj0vjiULpZx30ofisCqSSAgCNBZCggzU0kSIAE7EZDQAJ8u2o3XZ2/WanVWc5PeV9uXFFbbmDCRAAl4nwCdJe8zZY0kQAIk4DMCqSrA5Ku/bMLExXt0G33aVdMb4oYwNIDPmLNiEqCzxHeABEiABCxC4FJyKh7/ejXmbjyqJX7+pvro36E6goIYQ8kiJqSYFiVAZ8mihqPYJEACgUXg1IUk9PtyOVbvi0dYSDDe69EENzepGFgQqC0J+IkAnSU/gWezJEACJOApgb0nL6DPF8ux+8QFREYUwgQ1kbt1jdKePs5yJEAC+SRAZymfAPk4CZAACfiSwJr98eg3cTlOqp6lSiUKq9AALVGrXHFfNsm6SYAE3AjQWXIDwksSIAESMAuB+ZuO4rGvVuFSchpiK0Xi8/tbolxkhFnEoxwkEDAE6CwFjKmpKAmQgJUITF6yB8N+2gi1+A1X1SmLsfc0R9Fw/sm2kg0pq30I8JtnH1tSExIgARsQSFPe0dtzt+LjP3dqbXrGxeC17rEIVZO6mUiABPxDgM6Sf7izVRIgARLIQCAxJRVPz1iHn9Ye0vcGd6qDx66pxdAAGUgxgwQKlgCdpYLlzdZIgARIIFMCsrfbg5NXYOmuUyikAky+eXtj3NGicqZlmUkCJFCwBOgsFSxvtkYCJEACGQgcjL+IPp//i+3HzqOYmpc07t7m6FC7bIZyzCABEvAPATpL/uHOVkmABEhAE9h46Az6qhhKx84lonxkOL7o0woNKkaSDgmQgIkI0FkykTEoCgmQQGAR+GvbcTw0ZSUuJKWiTvlimNi3FSqqWEpMJEAC5iJAZ8lc9qA0JEACAUJgxor9GDpzPVLU6re2Khr3x71bIKpwaIBoTzVJwFoE6CxZy16UlgRIwOIEHA4H3v9tO0Yv2K41ubVpRbx1R2OEFwqxuGYUnwTsS4DOkn1tS81IgARMRiA5NQ3Pf78e36w4oCV7+OqaGNK5LoLV6jcmEiAB8xKgs2Re21AyEiABGxE4n5iCh6eugsxTEt/olVticW+bqjbSkKqQgH0J0Fmyr22pGQmQgEkIHDt7CX3VZrgbD51F4dAQjLm7Ga5rUN4k0lEMEiCBnAjk2lmS8fY///wTixYtwp49e5CQkICyZcuiWTP15b/uOsTExOTUJu+TAAmQQMAQ2H70HPqo0AASS6l00TB83qclmsSUCBj9qSgJ2IGAx5sNXbx4ESNGjNDO0I033ohZs2YhPj4eISEh2LFjB4YNG4bq1avjpptuwtKlS+3AhjqQAAmQQL4ILN11ErePW6wdpeplimLmw+3oKOWLKB8mAf8Q8LhnqU6dOmjdujU+/vhjXH/99QgNzbjEde/evZg2bRp69uyJF154AQMGDPCPVmyVBEiABPxMQPZ3G/LNWiSpSd3Nq5TAp/e3RCnVs8REAiRgPQIeO0u//vorYmNjs9WwatWqGDp0KJ566imI48REAiRAAoFGQKYqfPLXLrzx6xat+g0NozH6rqaIUHOVmEiABKxJwGNnKSdHyVX9sLAw1K5d2zWL5yRAAiRgewKpKsDk8J83YtKSy/9Z7Nu+Gl7o0gAhDA1ge9tTQXsT8HjOkiuGF198Eampqa5Z+vzMmTO4++67M+QzgwRIgATsTuCi2rJkoNq6xHCUXuhSH8NubkhHye6Gp34BQSBPztKkSZPQvn177Ny50wlp4cKFaNSokV4h58zkCQmQAAkEAIGT5xNx94SlmL/pKMIKBeOjXs3Rv0ONANCcKpJAYBDIk7O0bt06VKtWDU2bNsWECRPw9NNPo3PnzujTpw/+/vvvwCBHLUmABEhAEdhz4oJe8bZmf7ze221q/9bo0rgC2ZAACdiIgMdzllx1joqKwtdff43nn38eDz74IAoVKgSZAH7ttde6FuM5CZAACdiawKp9p9H/yxU4dSEJlUsWxsS+rVCrXDFb60zlSCAQCeSpZ0lAjRkzBqNGjdJzlGrUqIFBgwZh7dq1gciQOpMACQQggXkbj6CXGnoTR6lRpSgdQ4mOUgC+CFQ5IAjkyVmSoJTDhw+HzF2aOnUqVq9ejSuvvBJt2rTB22+/HRDgqCQJkEDgEpi0ZA8eVJO5LyWnoWPdsvj6gTYoVzwicIFQcxKwOYE8OUspKSmQeUt33HGHxlO4cGGMGzcO3377re5tsjkzqkcCJBCgBNJUaIA3Zm/GSz9uhAqnhLtbxWDCfXEoGp6nGQ0BSpFqk4D1COTJWZo/fz4qVqyYQdsuXbpg/fr1GfK9mTF27Fi9rUpERARatGih96jLrn7Zx07KSXkZLpQI5O7pu+++Q4MGDRAeHq6P33//vXsRXpMACQQ4gcSUNDw+fQ3Gq4CTkoZ0roMR3RuhUEie/owGOE2qTwLWIuD1b3mZMmU0AYli6+00ffp0PPHEE3piuQz9dejQATIkuG/fvkyb2r17t96rTspJ+f/+9796bpU4R0ZasmSJ3p6ld+/ees6VHHv06IFly5YZRXgkARIIcAIJKUDfL1fiZ7WFSSEVYHJkjyZ49JraCAoKCnAyVJ8EAoOAx85S/fr19b5vSUlJ2ZLZvn07HnroIbz11lvZlsvLzZEjR6Jfv37o378/RJ7Ro0frjX1lCDCzJL1IVapU0eWkvDz3n//8B++++66zuNTRqVMnvU1LvXr19FFW9Uk+EwmQAAkcjL+I0RtCsHzPaRRTw22y4u225pUJhgRIIIAIeDzQ/tFHH+HZZ5/FI488omMqxcXF6aE4Gd46ffo0Nm3apGMsyfHRRx/Fww8/7FWM4qStXLkSzz33XLp6Jb7T4sWL0+UZF9JrJPddk2wC/NlnnyE5OVlvBixlnnzySdcieqPg7JylxMREyMdIZ8+e1adSp3y8lYy6jKO36jVTPYZuxtFMsnlDFkMv4+iNOs1Wh6GbcTSbfPmRZ+Ohs+g/eRVOXAxC+eLh+PS+5qgXXdyr3/P8yOeNZw27GUdv1Gm2OgzdjKPZ5MuvPIZexjG/9ZnteUMv4+hN+Tyt02Nn6ZprrsHy5cu1YyLDYdOmTdPRui9evAgZemvWrBnuu+8+3HvvvShRooQ3ddF1nThxQm+xUr58+XR1y/WRI0fS5RkXkp9ZeZmgLvVVqFBBP5tZmazqlLrfeOMNvRrQaMc4zps3D0WKFDEuvXaUOWJ2T3bX0e76yftpNx03xwfhi63BSEwLQoXCDgysfQG7Vi3C5RlL9vtG2s1+mVnI7jpSv8ysnn1eQkJC9gX+d9djZ8morV27dpCPv5L7HAGZG+We5yqb+z1jLpVrvuu5PJtTnUOHDsXgwYOdzUjPUkxMjO7FioyMdObn90Q8Xnn5ZZgwNDQ0v9WZ8nm762h3/eSlsqOOM1YexIRlmyAb47auVgLdy55Atxvt+T20o/3c/9jZXUfq525xz6+NkaGcnsi1s5RThb66L71XISEhGXqRjh07lqH3yJAhOjo60/IScbx06dK6WFZl3HubjDrlKKvm5OOexKHxhVPjq3rd5ffntd11tLt+8u7YQUf5j9LoBdvx/m/b9dehe7NKeK1bfSyYN8cW+mX3HbeD/bLTT+7ZXUfql9MbkPG+MPMk5dlZ+u233yAfcVbS0tLStfX555+nu/bGRVhYmA4BID0t3bt3d1Yp17fccovz2vWkbdu2+Pnnn12zIENlMt/KACRlpA7XeUtSxp+9Z+kE5gUJkECBEEhOTcN/Z67HjJUHdHuPdKypwgPUhQzbM5EACQQ2gTw5SxK9+5VXXtFOh8z7cR/G8hVSGfqSpf3i7IiT88knn+iwAQMHDtRNyvDYwYMHdWRxyZD8Dz/8UA+ZDRgwADKZWyZ3f/XVV04RH3/8cR19XFbvidP1448/YsGCBdwQ2EmIJyRgfwLnLiXj4amrsGj7CajIAHjt1kbo1bqK/RWnhiRAAh4RyJOzJEvyJ06cqB0Xj1rxUqGePXvi5MmT2lE7fPgwYmNjMXv2bFStWlW3IHmuMZeqV6+u70uvkazmk0CaH3zwAW6//XanRNKDJJsCv/DCC3jxxRdRs2ZNyAT21q1bO8vwhARIwL4Ejp69hD5fLMfmw2dRODQEH93TDNfUS7+QxL7aUzMSIAFPCOTJWZJl/P4appKQBFmFJRAHzj1dddVVWLVqlXt2umvZtsXYuiXdDV6QAAnYmsC2o+fQ5/N/cejMJZQpFobP+7RE48olbK0zlSMBEsg9AY+DUrpWLcEdJXQAEwmQAAlYlcCSnSdx+7jF2lGqUaYoZj7Uno6SVY1JuUnAxwTy1LN06dIlPV9I5vY0btzYOVnakFUibTORAAmQgFkJ/LjmIJ6esQ5JalJ3XNWSejPckkXDzCou5SIBEvAzgTw5S+vWrUPTpk216Bs2bEinQkFN9k7XKC9IgARIwAMCEhrg4z934a05W3TpG2OjMapnU0SouUpMJEACJJAVgTw5S3/88UdW9TGfBEiABExJQAJMDvtpA6Ys3afl63dFdTx/U30Ey/I3JhIgARLIhkCenKVs6uMtEiABEjAdgYtJqXjsq9VYsPmoCnUCvNClAcRZYiIBEiABTwh47CzddtttOlyAbOch59mlmTNnZneb90iABEigwAicOJ+Ifl+uwNr98QgrFIz31bDbjY0qFFj7bIgESMD6BDx2lqKiopzBJ+WciQRIgATMTmD3iQu4X4UG2HcqASWKhOLT++IQV62U2cWmfCRAAiYj4LGz9MUXXzhFHzt2rN7ipGjRojpvz549+OGHH1C/fn1cf/31znI8IQESIAF/EVi59zT6f7kcpxOSEVOqMCb2bYWaZYv5Sxy2SwIkYGECeYqzJNuCTJ48WasdHx+PNm3a4L333sOtt96KcePGWRgHRScBErADgTkbjqDXhKXaUWpcOUrHUKKjZAfLUgcS8A+BPDlLEhG7Q4cOWuJvv/0W5cuXx969e/WebLKdCBMJkAAJ+IvAxH9246GpK5GYkqa2LSmHrx9og7LFw/0lDtslARKwAQGPh+FcdU1ISEDx4sV11rx58/SE7+DgYN3DJE4TEwmQAAkUNIE0FRrgjV83Y8Ki3bpp2Qj3lW4NUSgkT/8nLGjx2R4JkICJCeTpr0itWrX0HKX9+/dj7ty56Ny5s1bx2LFjkNVyTCRAAiRQkAQuJadi0NernY7S09fXxeu3xtJRKkgjsC0SsDGBPDlLL730EoYMGYJq1aqhdevWaNu2rUYkvUzNmjWzMS6qRgIkYDYC8QlJ6P3ZMvyy7jBCQ4JURO4meKRjLefqXbPJS3lIgASsRyBPw3B33HEHrrjiChw+fBhNmjRxan3ttdeie/fuzmuekAAJkIAvCexXIQHu/+Jf7Dp+AcXDC+Hj3i3QvlYZXzbJukmABAKQQJ6cJeEUHR2tP67MWrVq5XrJcxIgARLwGYF1B+Lxn4nLceJ8EipERejQAHWjL8+l9FmjrJgESCAgCeTZWQpIWlSaBEjAFAR+U9uWPDptNS6quUr1K0Tiiz4tEa0cJiYSIAES8AUBOku+oMo6SYAEfEZg8tK9GPbjBqjFb7iyTll81KsZikeE+qw9VkwCJEACdJb4DpAACViCgIQGeGvuFoz/c5eWt0dcZbzevZGa1J2ndSqW0JlCkgAJmIMAnSVz2IFSkAAJZEMgMSUVQ2asw89rD+lSgzvVwWPXcMVbNsh4iwRIwIsE6Cx5ESarIgES8D6BM2pvtwGTV+Df3adQKDgIb97eGHe0qOz9hlgjCZAACWRBgM5SFmCYTQIk4H8CEhqgr1rxtuPYeR0aYNy9LXBF7TL+F4wSkAAJBBQBOksBZW4qSwLWIbD+wBntKJ04n4joSBUa4D8tUS+aOwRYx4KUlATsQ4DOkn1sSU1IwDYEft9yFI9MvRwaoJ6KnfRF35YqllJh2+hHRUiABKxFgM6StexFaUnA9gSmLduHF35Yr0MDdFBDbmPvac7QALa3OhUkAXMToLNkbvtQOhIIGAISGuDdeVsxduFOrbNM4n7jNoYGCJgXgIqSgIkJ0FkysXEoGgkECgEJDfDMt+vw45rLoQGeuK42Hr+2NjfDDZQXgHqSgMkJ0FkyuYEoHgnYnYCEBnhAhQZY9r/QANKbdGdcjN3Vpn4kQAIWIkBnyULGoqgkYDcCB04noM8Xl0MDFAsvpOcnyRYmTCRAAiRgJgJ0lsxkDcpCAgFEYMPBy6EBjp+7HBrgc7UZboOKDA0QQK8AVSUByxCgs2QZU1FQErAPgT+2HlOhAVYhISlVxU5iaAD7WJaakIA9CdBZsqddqRUJmJaAhAZ48ccNSFWr39rXKg2Jyh0ZEWpaeSkYCZAACdBZ4jtAAiRQIAQkNMBbc7dg/J+7dHu3Na+EN29rjLBCwQXSPhshARIggbwSoLOUV3J8jgRIwGMCl5JTMfibNZi9/oh+hqEBPEbHgiRAAiYgQGfJBEagCCRgZwKyt9uASSuwel88QkOC8PYdjdG9WWU7q0zdSIAEbEaAzpLNDEp1SMBMBHYcO6c3w91/6iKiCodifO8WaFOjtJlEpCwkQAIkkCMBOks5ImIBEiCBvBBYvPMEBk5eibOXUlClVBG9GW7NssXyUhWfIQESIAG/EqCz5Ff8bJwE7Eng25UHMHTmOiSnOtC8SglMuC8OpYuF21NZakUCJGB7AnSWbG9iKkgCBUfA4XBg1ILt+OC37brRLo0r4L07myAiNKTghGBLJEACJOBlAnSWvAyU1ZFAoBJITEnD09+twQ//2wz34atrYkjnuggODgpUJNSbBEjAJgQsE+Dk9OnT6N27N6KiovRHzuPj47M0Q3JyMp599lk0atQIRYsWRcWKFXHffffh0KHLu5obD1599dV6Z/OgoCDn8a677jJu80gCJOABgQvJQJ+JK7SjFKKcozfVZrjP3FCPjpIH7FiEBEjA/AQs4yz16tULa9aswZw5c/RHzsVhyiolJCRg1apVePHFF/Vx5syZ2LZtG7p165bhkQEDBuDw4cPOz/jx4zOUYQYJkEDmBPaeTMCoDSFYsTcexdVmuBP7tsRdrapkXpi5JEACJGBBApYYhtu8ebN2kJYuXYrWrVtrzBMmTEDbtm2xdetW1K1bNwN66YGaP39+uvwxY8agVatW2LdvH6pU+f8/5kWKFEF0dHS6srwgARLImcCKPad0DKXTl4JQMSpCrXhrhbpqrzcmEiABErATAUs4S0uWLNFDb4ajJAZo06aNzlu8eHGmzlJmRjpz5oweaitRokS621OnTsWUKVNQvnx53HjjjRg2bBiKF8/6D35iYiLkY6SzZ8/qUxn6k4+3klGXcfRWvWaqx9DNOJpJNm/IYuhlHL1Rp1nqmKWicT8zcwOS1FylmKIOTOnXHBVLRnj1O2AGXQ3bGUczyORNGQy9jKM36zZLXYZuxtEscnlLDkMv4+ites1Sj6GXcfSmXJ7WGaRWrzi82bAv6hoxYgQmTpyoh9Fc669Tpw769u2LoUOHumZnen7p0iVcccUVqFevnnaMjELSQ1W9enXds7RhwwZdV61atTL0Shnl5fjyyy9j+PDhrln6fNq0aZBeKiYSsDMB+Ysx/2AQZu2/vMKtUck09K6dhnAueLOz2akbCdiSgEzZkWk+0pkSGRmZpY5+7VnKyulwlXb58uX6UiZguyfx8zLLdy8nnqNM2k5LS8PYsWPT3Zb5SkaKjY1F7dq1ERcXp+c5NW/e3LiV7ijO2eDBg5150rMUExODzp07Zwvb+YCHJyK3DCV26tQJoaH23JXd7jraTb/k1DQM+3mzcpQO6re4b7uqGHxNdfz+2wLbvqd2s6H7nx+76yf62l1H6uf+Vnt+bYwM5fSEX52lRx99VDsx2QlZrVo1rFu3DkePHs1Q7Pjx43roLMMNlwx5iXr06IHdu3fj999/z9GZEQdJHJPt27cjK2cpPDwc8nFP8pwvnBpf1esuvz+v7a6jHfQ7czEZD09djX92nIREA3i5W0Pc17aac9jNDjpm9x2gftnRscY92tAadspKSl/YT+r0JPnVWSpTpgzkk1OSidzSRfbvv//qCdpSftmyZTqvXbt2WT5uOEri+Pzxxx8oXTrnPak2btyo//hXqFAhy3p5gwQCjcA+teLtP18ux45j51EkLAQf9mqGa+qVDzQM1JcESCBACQRbQe/69evjhhtugAyZyYo4+ch5165d003ulvlI33//vVYpJSUFd9xxB1asWAGZwJ2amoojR47oT1JSki6zc+dOvPLKK7rMnj17MHv2bNx5551o1qwZ2rdvbwU0lJEEfE5guVrxduvYf7SjVD4yHN882JaOks+pswESIAEzEfBrz1JuQIjDM2jQID0vSJ6TeEkffvhhuiokjID0QEk6cOAAfvrpJ33etGlTfTT+kV4mCUYZFhaG3377De+//z7Onz+v5x116dJFr4YLCeFsVYMXj4FL4Du9x9t6JKm5So0qRek93qJViAAmEiABEggkApZxlkqVKpVuFVtmRnJd2CdznVyvMysvk7L//PPPzG4xjwQCmkBamgMj52/Dh3/s0BxuaBiNUT2borAagmMiARIggUAjYBlnKdAMQ31JwF8ELial4qkZazBbxVGSxD3e/GUJtksCJGAWAnSWzGIJykECJiBw7Owl9J+0AusOnEFoiOzx1hi3t6hsAskoAgmQAAn4jwCdJf+xZ8skYCoCGw6e0VuXHD5zCSWLhGJ87zi0ql7KVDJSGBIgARLwBwE6S/6gzjZJwGQE5m08gse/XoOLyamoWbYoPu/TElVLFzWZlBSHBEiABPxDgM6Sf7izVRIwBQFZBPHJX7vw5pwtakEE0KF2GRVDqTmiCnsWqM0USlAIEiABEvAxATpLPgbM6knArARkA9wXfliPb1Yc0CLe26YKXr65IQqFWCL8mlmxUi4SIAEbEqCzZEOjUiUSyInA6QtJGDhlJZbtPqW3LnmpawPc366aR3st5lQ375MACZCA3QjQWbKbRakPCeRAYOfx8+g3cTn2qC1MioUXwhi1dUnHuuVyeIq3SYAESCBwCdBZClzbU/MAJPDPjhN4SPUonb2UgkolCuuJ3HWjiwcgCapMAiRAAp4ToLPkOSuWJAFLE5i6bC+G/bgRKSo6d/MqJfDJfXEoUyzc0jpReBIgARIoCAJ0lgqCMtsgAT8SSFb7ur36yyZMWrJXS3FL04p46/bGiAjl1iV+NAubJgESsBABOksWMhZFJYHcEohPSMLDU1dh8c6T+tGnr6+rty8JCgrKbVUsTwIkQAIBS4DOUsCanorbncCOY+fQ78sV2KsmchdRG+COVhvhdlYb4jKRAAmQAAnkjgCdpdzxYmkSsASBP7Ycw2Nfrcb5xBRULlkYn94fh3rRkZaQnUKSAAmQgNkI0Fkym0UoDwnkg4BE5J6waBfe+PVyRG7Z223cPc1RmhO580GVj5IACQQ6ATpLgf4GUH/bELik9nX77/frMXPVQa3T3a2qYHi3hggrxIjctjEyFSEBEvALATpLfsHORknAuwSOnbuEByevxOp98QgJDoJE5L6vbVVG5PYuZtZGAiQQoAToLAWo4am2fQhsOHgGAyatwOEzl/QGuGPVsFv7WmXsoyA1IQESIAE/E6Cz5GcDsHkSyA+BX9YdwpAZa3EpOQ01yxZVE7lbonqZovmpks+SAAmQAAm4EaCz5AaElyRgBQJpKgr36AXb8MHvO7S4HeuWxft3N0NkRKgVxKeMJEACJGApAnSWLGUuCksCwAUVDuCpb9ZizsYjGscDV9bAszfU03OVyIcESIAESMD7BOgseZ8payQBnxHYc+ICHpi8AtuOnkdYSDBG3NYId7So7LP2WDEJkAAJkABAZ4lvAQlYhMDCrccwSAWaPHspBeWKh+Pj3i3UhrglLSI9xSQBEiAB6xKgs2Rd21HyACEggSY//nMX3p57OdBk8yol8PG9LVAuMiJACFBNEiABEvAvATpL/uXP1kkgWwIJSSl4+tt1mLXusC4ngSZf7tYA4YVCsn2ON0mABEiABLxHgM6S91iyJhLwKoF9agNcmZ+05cg5hIYEKSepIe5pXdWrbbAyEiABEiCBnAnQWcqZEUuQQIET+Hv7CTz61SrEJySjjNrX7eN7myOuWqkCl4MNkgAJkAAJcII33wESMBUBmZ/06aLdaiPczVChlNAkpgTGq/lJ0VGcn2QqQ1EYEiCBgCLAnqWAMjeVNTOBi0mpeG7mOvy45pAW804VEuDVW2MREcr5SWa2G2UjARKwPwE6S/a3MTW0AIEDpxP0RrgbD51FIdkI9+YG6N2GG+FawHQUkQRIIAAI0FkKACNTRXMTWLxTzU+athqnLiShdNEwfKQ2wm1To7S5haZ0JEACJBBABOgsBZCxqaq5CMj8pM//2YMRszcjVU1Qiq0UifG941CpRGFzCUppSIAESCDACdBZCvAXgOr7h4Ds7/bsd+vwy//iJ3VvVglvqK1LOD/JP/ZgqyRAAiSQHQE6S9nR4T0S8AGBncfPY+Dkldh+7Lyen/R8l/ro064agoKCfNAaqyQBEiABEsgvATpL+SXI50kgFwTmbDiCITPW4rzqWZL93caq+UmMn5QLgCxKAiRAAn4gQGfJD9DZZOARSElNw3sLtqg93nZq5VtVL4UPezVTDhPjJwXe20CNSYAErEaAzpLVLEZ5LUfgXDLQ98uVWLr7tJa9/xXV8eyN9dQWJsGW04UCkwAJkEAgEqCzFIhWp84FRmD1/ni8sy4EZ5JOo0hYCN6+ozG6Nq5YYO2zIRIgARIggfwTsMx/bU+fPo3evXsjKipKf+Q8Pj4+WwJ9+vTRk2Zl4qzxadOmTbpnEhMT8dhjj6FMmTIoWrQounXrhgMHDqQrwwsSyC0BCQsweele3PPZcuUoBaFGmSL48ZH2dJRyC5LlSYAESMAEBCzjLPXq1Qtr1qzBnDlz9EfOxWHKKd1www04fPiw8zN79ux0jzzxxBP4/vvv8fXXX+Pvv//G+fPn0bVrV6SmpqYrxwsS8JSAbFvylJrE/eIPG5Cc6kCTUmn49sE2qF2+uKdVsBwJkAAJkICJCFhiGG7z5s3aQVq6dClat26t8U2YMAFt27bF1q1bUbdu3SyRhoeHIzo6OtP7Z86cwWeffYbJkyfjuuuu02WmTJmCmJgYLFiwANdff32mzzGTBLIisPfkBb1tyZYj56B2LcGQzrVR8cxmFI+wxFctK7WYTwIkQAIBTcASPUtLlizRQ2+GoyQWk+E0GZJbvHhxtgZcuHAhypUrhzp16mDAgAE4duyYs/zKlSuRnJyMzp07O/MqVqyI2NjYHOt1PsATEvgfgXkbj6DrmL8hjpJsWzKlf2sMUJO5GT6JrwgJkAAJWJuAJf67e+TIEe3wuKMWJ0juZZVuvPFG3HnnnahatSp2796NF198Eddccw3ESZIeJ3k2LCwMJUuWTFdF+fLls61X5jnJx0hnz57Vp+J4ycdbyajLOHqrXjPVY+hmHM0km6eyJKuwAO/O247PF+/VjzSNicIHPZugQlSE832wsn45cTB0M445lbfafUMv42g1+XOS19DLOOZU3or3Dd2MoxV1yE5mQy/jmF1ZK94z9DKO3tTB0zr96iy9/PLLGD58eLZ6L1++XN/PLLqxTKLNLN+osGfPnsap7i2Ki4vTjtOsWbNw2223Oe+5n+RU7xtvvJGp3PPmzUORIkXcq8v39fz58/Ndh9krsKqOp5XPPHFbCPacV2NuKl1dIQ03VzyJ1f/8jtUu0K2qn4sKOZ7aXUfql+MrYPoCtKHpTZStgL6wX0JCQrZtGjf96iw9+uijuOuuuwxZMj1Wq1YN69atw9GjRzPcP378OKQXyNNUoUIF7Sxt375dPyJzmZKSkiAr7Vx7l2Sorl27dllWO3ToUAwePNh5X3qWZJ6TDOdFRkY68/N7Ih6vvBydOnVCaGhofqsz5fNW1vGv7Sfw8rfrcTohWc9JerN7Q3RukP59tLJ+nr4wdteR+nn6Jpi3HG1oXtt4Ipkv7WeMDOUkh1+dJVmuL5+ckkzklsnY//77L1q1aqWLL1u2TOdl59S413vy5Ens378f4jRJatGihXZCxCHp0aOHzpOVcxs2bMDbb7+trzP7R4bw5OOexKHxhVPjq3rd5ffntZV0lGjcoxdsx4d/7NDIYitFYmyvFqhSOuteRSvpl9f3wO46Ur+8vhnmeY42NI8t8iKJL+wndXqSgj0p5O8y9evXh4QAkAnasiJOPnIuS/xdV8LVq1dPhwEQeSUEwJAhQyCTw/fs2QOZ6H3zzTdr56x79+5aJZkg3q9fPzz11FP47bffsHr1atx7771o1KiRc3Wcv3Vn++YicOzsJdz72TKno9S7TVV8O7Bdto6SuTSgNCRAAiRAArkl4NeepdwIO3XqVAwaNMi5ck2CR3744YfpqpAwAtIDJSkkJATr16/HpEmTdPBK6U3q2LEjpk+fjuLF/z/ezahRo1CoUCHds3Tx4kVce+21mDhxon4+XeW8CHgCi3eewKCv1uDE+UQUVdG437i9Mbo1qRjwXAiABEiABOxOwDLOUqlSpSAxkLJLMjHbSIULF8bcuXONyyyPERERGDNmjP5kWYg3AppAWpoDH6kht1ELtkGdol50cXx0T3PULFssoLlQeRIgARIIFAKWcZYCxSDU01wEpBdp8Ddr8de241qwnnExeLlbQxRWPUtMJEACJEACgUGAzlJg2Jla5oHA4h0n8MT0NTh2LhERocF47dZGuKNF5TzUxEdIgARIgASsTIDOkpWtR9l9QsBY7fbRwh2Qkd3a5Yrhw17NUVcNvzGRAAmQAAkEHgE6S4Fnc2qcDYGD8Rfx+FersWLvaV3q7lYxeKkrh92yQcZbJEACJGB7AnSWbG9iKugpgTkbjuDZ79bhzEUVZDK8kFrt1ghdG3O1m6f8WI4ESIAE7EqAzpJdLUu9PCZwKTkVI2ZvxqQle/UzTWJK4MO7myGmVNZBJj2unAVJgARIgAQsT4DOkuVNSAXyQ2DHsfN4dNoqbDlyTlfz4JU18FTnuggrZIl4rflRnc+SAAmQAAl4SIDOkoegWMxeBCQm14yVBzDsx424qHqWShcNw8ieTXFVnbL2UpTakAAJkAAJ5JsAnaV8I2QFViNw7lIyXvhhA35cc0iL3r5WaYzq0RTlIiOspgrlJQESIAESKAACdJYKADKbMA+BlWqV2xPTV2P/qYsICQ7C4E518NBVNRGszplIgARIgARIIDMCdJYyo8I82xGQ2Eljft+hN8BNVXuWVCpRGB/c3RQtqpayna5UiARIgARIwLsE6Cx5lydrMyGBfScTdG/Sqn3xWrpbm1bEK7fGIjIi1ITSUiQSIAESIAGzEaCzZDaLUB6vEZBJ3DNXHcRLP27AhaRUHTvpte6xuKVpJa+1wYpIgARIgATsT4DOkv1tHJAanklIxn9/WI9Z6w5r/VtVK6VWuzVB5ZKMnRSQLwSVJgESIIF8EKCzlA94fNScBJbsPInB36zB4TOXUEhN3H5STeIeqCZxy4RuJhIgARIgARLILQE6S7klxvKmJZCUkoaR87dh/F879Qa41csUxWgVO0kicjORAAmQAAmQQF4J0FnKKzk+ZyoCW1UEbulN2njorJbrrpYxeLFrAxRVe7wxkQAJkAAJkEB+CPCXJD/0+KzfCUgYgE8X7cJ787YhSYUHKFEkFG/e1hg3xEb7XTYKQAIkQAIkYA8CdJbsYceA1EJCAjw1Yw2W7zmt9b+mXjnlKDViJO6AfBuoNAmQAAn4jgCdJd+xZc0+IiAhAab9uw+vz9qMBBUSoGhYCF66uQF6xMUgKIiTuH2EndWSAAmQQMASoLMUsKa3puJHz17CM9+uw5/bjmsFWlcvhXfvbIKYUgwJYE2LUmoSIAESMD8BOkvmtxEl/B+Bn9YewotqA9wzF5MRVigYz1xfF/9pX537uvENIQESIAES8CkBOks+xcvKvUHg9IUkvKCicBsBJhtVisLIHk1Qu3xxb1TPOkiABEiABEggWwJ0lrLFw5v+JjBnwxG8oHqTTpxP1EElH7umFh7pWAuhIcH+Fo3tkwAJkAAJBAgBOksBYmirqXlSOUcv/bTR2ZtUq1wx3ZvUuHIJq6lCeUmABEiABCxOgM6SxQ1oN/Flpdsvaj+3YcpROqWG32SLkgevrIFB19ZGRGiI3dSlPiRAAiRAAhYgQGfJAkYKFBGPnbukJ3DP3XhUq1wvujjeuaMJGlWOChQE1JMESIAESMCEBOgsmdAogSaS6kzCD2sO4bXZW/VKN9n89lE1N+nhq2vpVW+BxoP6kgAJkAAJmIsAnSVz2SPgpDmi4iZ9siUYm5Zu0LrHVorUvUn1K0QGHAsqTAIkQAIkYE4CdJbMaRfbSyVzk75ZsR+v/rIZ5xOD1eq2IDxxXR08oOYncaWb7c1PBUmABEjAUgToLFnKXPYQdtfx8/jv9+uxdNcprVDVYg6M69sODSqVtIeC1IIESIAESMBWBOgs2cqc5lYmKSUN4//ciTF/7ICcR4QG43E1Nyn6zCbUVqEBmEiABEiABEjAjAToLJnRKjaUacWeUxg6cz22HzuvtbuyTlm8fmssoouHYvbsTTbUmCqRAAmQAAnYhQCdJbtY0qR6yD5ub8/ZgqnL9mkJSxcNw0s3N0C3JhURFBSE5ORkk0pOsUiABEiABEjgMgE6S3wTfEJAJnD/qrYqkeCSx88l6jZ6xsVg6E31UKJImE/aZKUkQAIkQAIk4AsCdJZ8QTXA6zwUfxEvqY1vF2w+pknUKFMUr3dvhLY1Swc4GapPAiRAAiRgRQJ0lqxoNZPKnJyahon/7MHoBdtwISlVhwN4SAWWfPjqmtyqxKQ2o1gkQAIkQAI5E6CzlDMjlvCAwNJdJ3Vv0raj53XpuKol8cZtjVC7fHEPnmYREiABEiABEjAvATpL5rWNJSST/dzemL0F368+qOUtpSZwP3dDPdzRojKC1bYlTCRAAiRAAiRgdQLBVlHg9OnT6N27N6KiovRHzuPj47MVX1ZbZfZ55513nM9dffXVGcrcddddzvs8yZxAihpy++Kf3bj23T+1o6RQ457WVfD7U1ehR8sYOkqZY2MuCZAACZCABQlYpmepV69eOHDgAObMmaMxP/DAA9p5+vnnn7PEfvjw4XT3fv31V/Tr1w+33357uvwBAwbglVdeceYVLlzYec6TjARW7j2FF37YiM2Hz+qbjStH4dVbYtEkpkTGwswhARIgARIgAYsTsISztHnzZu0kLV26FK1bt9bIJ0yYgLZt22Lr1q2oW7dupmaIjo5Ol//jjz+iY8eOqFGjRrr8IkWKwL1sugK80AROnE/EW79uwYyVB/R1VOFQPHNDXdzVsgpCOOTGt4QESIAESMCmBCwxDLdkyRI99GY4SmKLNm3a6LzFixd7ZJqjR49i1qxZumfJ/YGpU6eiTJkyaNiwIYYMGYJz5865Fwnoa9maZMJfu9DxnYVOR0liJsmQ2z2tq9JRCui3g8qTAAmQgP0JWKJn6ciRIyhXrlwGa0ie3PMkffnllyhevDhuu+22dMXvueceVK9eXfcsbdiwAUOHDsXatWsxf/78dOVcLxITEyEfI509e3k4SqJRezMitVGXcTTaK6ijBJZcuO0ERvy6FXtOJuhmYytG4qUu9dCsSgl9nV/ZjOeNY0HpVlDtGHoZx4JqtyDbMXQzjgXZdkG0ZehlHAuizYJsw9DLOBZk2wXVlqGbcSyodguqHUMv41hQ7RZUO4ZextGb7XpaZ5D6QXR4s+Hc1PXyyy9j+PDh2T6yfPlyzJs3D+LsyJCba6pdu7buKXruuedcszM9r1evHjp16oQxY8Zket/IXLlyJeLi4iDH5s2bG9npjlnJPW3aNMiQnh3SEeUb/bA3GJvjg7U6xUMd6FolDa3KOsARNztYmDqQAAmQAAkkJCRA5kSfOXMGkZGRWQLxa8/So48+ipxWnlWrVg3r1q2DDKO5p+PHj6N8+fLu2RmuFy1apB2t6dOnZ7jnniEOUmhoKLZv356lsyS9T4MHD3Y+Kj1LMTEx6Ny5c7awnQ94eCIer/RwiZMnMhVEkr3cPvxjJ6as34+UNIcOLNmnbVU8dFUNFI/w/uviDx0LgqPRht31Ez3triP1M95m6x5pQ+vaTiT3pf2MkaGcCHn/1y+nFl3uyzwh+eSUZCK3eH3//vsvWrVqpYsvW7ZM57Vr1y6nx/HZZ5+hRYsWaNKkSY5lN27cqA1ToUKFLMuGh4dDPu5JHBpfODW+qtdVfgkFMH3Ffrw3bxtOXUjStzo1KI/nb6qPamq7El+ngtDR1zpkV7/d9RPd7a4j9cvuDbfGPdrQGnbKSkpf2E/q9CT51VnyREApU79+fdxwww2QJf7jx4/Xj0nogK5du6ZbCSdDbW+88Qa6d+/urFq8xhkzZuC9995z5hknO3fuhEzuvummm7TTtmnTJjz11FNo1qwZ2rdvbxSz9VFGYf/YegxvqlVuRvTt2uWK4aWbG6BD7bK21p3KkQAJkAAJkIAnBCzhLIki4tQMGjRID3XJdbdu3fDhhx/KqTPJnCbpgXJNX3/9NcQhuPvuu12z9XlYWBh+++03vP/++zh//rweSuvSpQuGDRuGkJCQDOXtlrHh4Bm8PmszlqitSiSVKBKKJ66tjXvbVEWhkMtzleymM/UhARIgARIggdwSsIyzVKpUKUyZMiVb/TKbqy49UPLJLMk8oz///DOzW7bOO3A6Ae/O3Yof1hzSeoYVCkbfdtXwcMdakNhJTCRAAiRAAiRAAv9PwDLO0v+LzLO8EpDJ22P/2IEvFu+BxE6S1L1ZJTzVuQ4ql7THKr68suFzJEACJEACJJAVATpLWZGxUX5iSiqmLN2HMb9vR3xCstasXc3S+K+avB1bKcpGmlIVEiABEiABEvA+ATpL3mdqmhplhdvM1Qfx/oLtOBh/Ucslk7fFSbq6blm9gbBphKUgJEACJEACJGBSAnSWTGqY/IiVpuIj/brhCN6bvxW7jl/QVZWPDMcT19XBnS0qc/J2fuDyWRIgARIggYAjQGfJRiaXCe4Ltx7HO2ry9qbDZ7VmJdUKt4evroXeKrBkRKj9V/jZyJxUhQRIgARIwCQE6CyZxBD5FWOZWv4vTtKKvad1VcXCC6F/h+rod0V1FXmbK9zyy5fPkwAJkAAJBC4BOksWt/3qfacxSs1J+mvbca1JuAoD0EeFARh4VU2ULBpmce0oPgmQAAmQAAn4nwCdJf/bIE8SrNx7CqOVk7Ro+wn9fCG1u+1drWLw2DW1UT4yIk918iESIAESIAESIIGMBOgsZWRi6hwZbvtAhQD4Z8flqNshykmSWEmDlJNUpTRjJZnaeBSOBEiABEjAkgToLFnAbDJxe/HOEzoEwLLdp7TE0pN0Z1xlPXk7phSdJAuYkSKSAAmQAAlYlACdJRMbTpykrfFBmPzZcjVxO15LGhoShB5xMXjo6pqMum1i21E0EiABEiAB+xCgs2RiWw6avg5zNsty/3iEqY1tZU6STNyuWKKwiaWmaCRAAiRAAiRgLwJ0lkxsz7iqJfDbpiPo1aaq6kmqjegoTtw2sbkoGgmQAAmQgE0J0FkysWF7qjlJYUc34u6b6iE0lLGSTGwqikYCJEACJGBjAsE21s3yqknE7SiGSrK8HakACZAACZCAtQnQWbK2/Sg9CZAACZAACZCAjwnQWfIxYFZPAiRAAiRAAiRgbQJ0lqxtP0pPAiRAAiRAAiTgYwJ0lnwMmNWTAAmQAAmQAAlYmwCdJWvbj9KTAAmQAAmQAAn4mACdJR8DZvUkQAIkQAIkQALWJkBnydr2o/QkQAIkQAIkQAI+JkBnyceAWT0JkAAJkAAJkIC1CdBZsrb9KD0JkAAJkAAJkICPCdBZ8jFgVk8CJEACJEACJGBtAnSWrG0/Sk8CJEACJEACJOBjAnSWfAyY1ZMACZAACZAACVibAJ0la9uP0pMACZAACZAACfiYQCEf1x8Q1TscDq3n2bNnvapvcnIyEhISIPWGhoZ6tW6zVGZ3He2un7xHdteR+pnlr0Xe5aAN887ODE/60n7G77bxO56VvnSWsiKTi/xz587p0jExMbl4ikVJgARIgARIgATMQEB+x6OiorIUJUh5U5e7RbIswhs5EUhLS8OhQ4dQvHhxBAUF5VTc4/vi8YoDtn//fkRGRnr8nJUK2l1Hu+sn75rddaR+VvqLkrmstGHmXKyS60v7iQskjlLFihURHJz1zCT2LHnhbRHAlStX9kJNmVchjpJdnSVDY7vraHf9xI5215H6Gd9W6x5pQ+vaTiT3lf2y61EyiGXtRhkleCQBEiABEiABEiCBACZAZymAjU/VSYAESIAESIAEciZAZylnRn4rER4ejmHDhkGOdk1219Hu+sl7aXcdqZ/1//rQhta2oRnsxwne1n6HKD0JkAAJkAAJkICPCbBnyceAWT0JkAAJkAAJkIC1CdBZsrb9KD0JkAAJkAAJkICPCdBZ8jFgVk8CJEACJEACJGBtAnSWrG0/Sk8CJEACJEACJOBjAnSWfAw4p+pff/11tGvXDkWKFEGJEiVyKq7vS8TRl19+WUccLVy4MK6++mps3Lgx3bOJiYl47LHHUKZMGRQtWhTdunXDgQMH0pUpiIvTp0+jd+/eOoy8BP6S8/j4+GyblijomX3eeecd53Ois3uZu+66y3m/oE7yol+fPn0yyN6mTZt0IpvFfiJUbnWUfZyeffZZNGrUSL97Ehn3vvvu01HuXZX0lw3Hjh2L6tWrIyIiAi1atMCiRYtcxcpw/ueff+pyUr5GjRr4+OOPM5T57rvv0KBBA70yUI7ff/99hjIFmZEbHWfOnIlOnTqhbNmyOuhf27ZtMXfu3HTiTpw4McM7K9+/S5cupStXUBe50W/hwoWZyr5ly5Z04prJhrnRL7O/J2Kbhg0bOvUzk/3++usv3Hzzzfr3S+T84YcfnHJmdWKK76Bsd8LkPwIvvfSSY+TIkY7Bgwc7lDPhkSBvvvmmQ22t4lBfbsf69esdPXv2dFSoUMGhQsI7nx84cKCjUqVKjvnz5ztWrVrl6Nixo6NJkyaOlJQUZ5mCOLnhhhscsbGxjsWLF+uPnHft2jXbpg8fPuxw/Xz++ecO9aVy7Ny50/ncVVdd5RgwYEC6csoJc94vqJO86Hf//fc75DlXHU+ePJlOZLPYT4TKrY5ih+uuu84xffp0h/pBcixZssTRunVrh3JM0unoDxt+/fXXDrUptWPChAmOTZs2OR5//HGH+s+EY+/evelkMy527drlUP+R0eWkvDwnz3/77bdGEf1eh4SEOEaMGOHYvHmzPhYqVMixdOlSZ5mCPMmtjsLgrbfecvz777+Obdu2OYYOHap1lL8bRvriiy8cKnpyundW3l9/pNzq98cff8iWXo6tW7emk9/1b6H8fTKLDXOrn3zfXP+WqO2xHKVKlXKosDNO85jJfrNnz3Y8//zz+vdL7KL+Y+GUM7MTs3wHkZlwzCt4AvIye+IsqX3oHNHR0Q5xmIyk/nenn1X/49VZ8uWRP+jypTPSwYMHHWpbFsecOXOMLJ8f5cdFvgyuPxrywyl58iPqabrlllsc11xzTbri8kMrf+T9mfKqnzhLolNWySz2E/nyqqO7bvJDLHZ3dUr8YcNWrVo5xBF1TfXq1XM899xzrlnO82eeecYh913Tgw8+6FA9gc6sHj16aIfSmaFOrr/+eofq6XTNKrDz3OqYmWCqd8wxfPhw5y1P/z45H/DhSW71M5wl1UOapVRmsmFu9XNXSpwP+c/lnj17nLfMZD+nUOrEE2fJLN9BDsMpa1kp7d69G0eOHEHnzp2dYkvALvXDA/W/I523cuVKyFCIaxkZClG9Os4yzod9eKIcIz38pnoVnK3IcJNyCj2W4+jRo5g1axb69evnrMM4mTp1qh5mlO7mIUOG6M0QjXsFccyPfjI0UK5cOdSpUweqhwzHjh1zimwW+4lA+dHRqZA6OXPmjB4KcR9qLkgbJiUlQdi6fi9ERrk2vjuuMsu56O9eXjlCWLFihf6OZVcmqzrd2/DmdV50dG9fNgaXjUVV70S6W+fPn0fVqlX1PpiqdxirV69Od78gLvKjX7NmzaB64HHttddCOVDpxM3KzgVtw/zoZyj02WefQfXsalsZeXI0g/1c5fH0PCvbFPR3kBvpemoxk5QTR0lS+fLl00kk1+p/7TpPyoSFhaFkyZIZyhjPp7vhowtpSxwC9yR5nsrx5ZdfQg054rbbbktXzT333KPnnaheNmzYsAFq6ABr166FGnZMV86XF3nV78Ybb8Sdd96p/5iJ8/viiy9C9ZzpH3JxfKVeM9hP2OVVR1fuMq9F9dygV69e6TaELmgbnjhxAqmpqZl+d7J6HyU/s++aGsKB1Cc/vlmVyapOVzbePs+Lju4yvPfee7hw4QJUb4vzlupdg8x7kXlosgP8+++/j/bt2+vvXO3atZ3lfH2SF/3ERp988omedyZzASdPnqwdJvkPy5VXXqlFNosN86KfK3M1HIdff/0V06ZNc82GWeyXTigPL7KyTUF/B+kseWiw3BSTydeqCzvbR5YvX464uLhsy2R3UybGuSbp0XTPc70v556UcX8ms2tP9ZNnM5MpN3Ko+UqQH1WZXOuapDfGSNJjJn+whaeaZ4HmzZsbt/J09LV+ao6ZUy6RXeSW/7FLD5q7U+gsqE5yw831uczOfa2j0ab0cMrEe+mtkEmrrsmXNnRtx/3c/Z3MiWtm5aVO13zXc7mXU51Sxpcpr/J89dVXkHfjxx9/TPcfHekRdl2EII6SfM/GjBmDDz74wJeqZFp3bvSrW7cu5GMkmcCu5vXg3XffdTpLci83dRp1+eqYV1nEoZXe21tvvTWdaGazXzrhPLjIjIc85prvei73vP0dpLMkVL2cHn30Uf0DkV211apVy+52lvekJ0WSeNvyPyYjyTCO8T9gKSPdubKKybV3ScrIyrv8Jk/1W7duHWQYzT0dP37cKav7PddrWaWkJmVCTRR2zc70XP5wq3la2L59e76dpYLSz1BE7CjOksguydf2kzYKQkdxlKR3QnrPfv/993S9SiKDe/KmDd3rlmtZGaom8ervjut91++Oa76ciy3ce4ikvJrAjdKlS+viWZUxvo/udfryOi86GvLI90yGu2fMmKGHcYz8zI5q/iNatmzpfGczK+OLvPzo5yqPOA9TpkxxZpnFhvnRT5wD+c+lrDiWnunskr/sl51MWd3LyjYF/h1UgJlMQMDTCXjGBG9ZvWIk1bWc6QRvWY1kpEOHDvltgveyZcsMMfRkb/Wl8GiCt0yEdl9B5azI7URWBUq9aomp2x3fXRqTn/OqnyGZ6np3qOE3hxpy1FnGBG9/20+EyauOyll3qP/dOtR8ModyLgxVsz0WhA1l8uxDDz2UTo769etnO8Fb7rsmmSCufmydWTI5WA2tOq/lRFYQ+nOCd250FHnVsI1D9d7muDJJykqSv0OqR9TRt2/fyxkF+G9ubZiZaLfffrteIWzcM5MN86qfMZFdvkc5JX/az1U2+Zud02o4meBthu+gdFUx+ZGArA5SEyX1ypNixYrpc7lWEyydUqkuZIeKheK8lpVwapK0zpMvxt13351p6IDKlSs7FixYoEMHyGoyf4UOaNy4sV4+ribqOdSchwyhA9z1E0XVhGC9ZHvcuHFOvY2THTt2aF5qKNOhei0cavhKr1hSEzj9EhohN/qJXZ966im93Fxklz9walhAh3lwD/1gBvsJc/nhz42OqkfJoeJ6OUT+NWvWpFvWLI69JH/Z0FiWrSbBakfwiSee0KEDjJVDsipO/c9cyyj/GMuWn3zySV1ennMPHfDPP//oZefyvZTQAXI0Q+gAT3UUR0nk/eijj9LZSpx2I6mhOb2SVsJ3yN8ncZLkGdf/KBhlfX3MrQ1HjRqlf5AlLIKa36gdY/mRltArRjKTDXOrn6HDvffeq0N0GNeuRzPZT/4GyjskH7GDhM6Rc2OlrFm/g3SWXN8oP5xL74m8MO4f+RE1ktyTnicjyf8KJIaG6p7UPRJqkqKOt2Tcl+PFixcdaqhFx9tQgSu1g7Jv3z7XIgVyLvGD1JwjHRdKYkPJufsSXnf9RLDx48c7RG7XP9iGwKKH6CyxRFR3s6NmzZqOQYMGOdxjFRnlfXnMrX4JCQkOtbrKoQIA6h/dKlWqOOQdcLeNWewn7HKroziB7u+zcW281/60oTgFathTvztq6C9db6TYQkIauCY1Edghjri8a2r43JGZA6+Grhzi9IsjJaEGXH+IXesqqPPc6Cj6GvZxPQoLI4lTKe+qMJB3V95htVLMuF3gx9zoJ73w8jdCes7UtATHFVdcof+D5S60mWyYG/1ED/k7KX8v1UR2d7X0tZnsJ38DXN8z49x438z6HQwSkkpYJhIgARIgARIgARIggUwIBGeSxywSIAESIAESIAESIIH/EaCzxFeBBEiABEiABEiABLIhQGcpGzi8RQIkQAIkQAIkQAJ0lvgOkAAJkAAJkAAJkEA2BOgsZQOHt0iABEiABEiABEiAzhLfARIgARIgARIgARLIhgCdpWzg8BYJkAAJkAAJkAAJ0FniO0ACJEACJEACJEAC2RCgs5QNHN4iARIgARIgARIgATpLfAdIgARIgARIgARIIBsCdJaygcNbJEACgUng+PHjUHsvYsSIEU4AatNYqL3RMG/ePGceT0iABAKDAPeGCww7U0sSIIFcEpg9ezZuvfVWqA1joTbHhdpMF126dMHo0aNzWROLkwAJWJ0AnSWrW5DykwAJ+IzAI488ggULFqBly5ZYu3Ytli9fDrV7vc/aY8UkQALmJEBnyZx2oVQkQAImIHDx4kXExsZi//79WLFiBRo3bmwCqSgCCZBAQRPgnKWCJs72SIAELENg165dOHToENLS0rB3717LyE1BSYAEvEuAPUve5cnaSIAEbEIgKSkJrVq1QtOmTfWcpZEjR2L9+vUoX768TTSkGiRAAp4SoLPkKSmWIwESCCgCTz/9NL799ls9V6lYsWLo2LEjihcvjl9++SWgOFBZEiABgMNwfAtIgARIwI3AwoUL9aq3yZMnIzIyEsHBwZDzv//+G+PGjXMrzUsSIAG7E2DPkt0tTP1IgARIgARIgATyRYA9S/nCx4dJgARIgARIgATsToDOkt0tTP1IgARIgARIgATyRYDOUr7w8WESIAESIAESIAG7E6CzZHcLUz8SIAESIAESIIF8EaCzlC98fJgESIAESIAESMDuBOgs2d3C1I8ESIAESIAESCBfBOgs5QsfHyYBEiABEiABErA7ATpLdrcw9SMBEiABEiABEsgXATpL+cLHh0mABEiABEiABOxOgM6S3S1M/UiABEiABEiABPJFgM5SvvDxYRIgARIgARIgAbsToLNkdwtTPxIgARIgARIggXwR+D9mO+wPsVBycwAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAksAAAHFCAYAAADi7703AABztElEQVR4Ae2dB3gUVdfH/0lIoSV0Qgm9E3rooqKCBUSxgKIovIBiQ0UsvBbEgh1QFEQsSFNEsYI0FUUp0nvvvYcWSN3vnss7+202bZPsZmdm//d5lpm5c+fec35nNnu45dwgh0pgIgESIAESIAESIAESyJRAcKa5zCQBEiABEiABEiABEtAE6CzxRSABEiABEiABEiCBbAjQWcoGDm+RAAmQAAmQAAmQAJ0lvgMkQAIkQAIkQAIkkA0BOkvZwOEtEiABEiABEiABEqCzxHeABEiABEiABEiABLIhQGcpGzi8RQIkQAIkQAIkQAJ0lvgOkAAJkAAJkAAJkEA2BOgsZQOHt0iABEiABEiABEiAzhLfARIwCYGJEyciKCjI+SlUqBAqV66Mvn374uDBg04pFy5cqMvIMbdp8eLFePnllxEfH5/bR3MsP336dDRs2BCFCxfW8q1ZsybHZ7IqIDIKi/ykSZMmoWzZsjh37lyuqjl9+jRKlCiBH374wePnfvvtN8TFxaFo0aJa7tw863EjBVBw9OjRuO2221C9enWtx9VXX+1xq5s2bdLv1p49ezx+xtOCL7zwArp27YpKlSppufr06ePpoyxHAl4hQGfJKxhZCQl4j8AXX3yBJUuWYP78+RgwYAC++uordOjQARcuXMh3I+IsDR8+3OvO0vHjx9G7d2/UrFkTc+bM0fLXqVMnz/L2799f15HXChISEvDf//4Xzz77LIoXL56rakqWLIknn3wSTz/9NJKSknJ8VnaM6tGjB0JDQ/HTTz9pua+66qocnzNjgY8//hh79+7FNddcox3N3MgozpK8W75wlkaNGoWTJ0+iW7duCAsLy41YLEsCXiFQyCu1sBISIAGvEYiNjdW9FFJhx44dkZqaildffVX3dNxzzz1ea8ebFW3btg3Jycm499574Q1HQXrU5JPX9OWXX+ofV3G68pIGDhyI1157Dd9++y169eqVbRWHDh3CqVOn0L17d1x77bXZlhUnrkiRItmW8edNcXiCgy//H1reQ7Mk6R005Jo8ebJZxKIcAUSAPUsBZGyqak0Cbdq00YLL//izS9Kr0bZtW/1jLL0pnTp1Stc7I0Nb0lsiyRhmkaGunIbzcqpXhkSuuOIKXW/Pnj1zHL4Rh2HIkCFahoiICJQqVUo7h9KDZqTMhuGqVaumh2Kk56p58+Z6uK9evXr4/PPPjcecx3HjxuHmm2/Ww2lG5tdff61l+/DDD40sfRw2bBhCQkJ0T55xo3z58pqf9LRkl0ROw6mTXizhKXJKMnRYtWoV7rjjDkiPlfS8Sbp06RKGDh2qGUhPiQwvPfLIIxl6/Aydf/nlFzRr1kzrXL9+fci1JBm6lWsZ/mvVqhVWrFih8/P6j+GQ5PZ5kePOO+/Uj4mDbwwnS76RxE5NmjSBYXNxLjdv3mzczvaYV7myrZQ3SSA3BFQXMhMJkIAJCKjhN4f67jqWL1+eTpr3339f53/yySc6/48//tDXcjTS1KlTdV7nzp0dar6MQ80fcrRo0cKhfogdixYt0sX279/veOyxx3S5mTNnOtRQn/6cOXPGqCbD0ZN6d+zY4fjoo490vSNGjNB1bty4MUNdRsaDDz7oUL0rjpEjRzpEB/XD73jzzTcdY8aMMYo4lAOj63NmqJOqVas6lGPiaNCggUPNR3LMnTvXoX6gdbk///zTWVT0FI5jx4515hknqsdIMzEYq7lGDvVD7FBzYowizuNbb72l76k5TM489xNpS1hKe8JWmCrnSBczdBC5lSPlUMOq2jZpaWmO66+/3qHmpDlefPFFx7x58xzvvvuuQzk8DuUQOZQj5WzG0Fn18jiUM+mYPXu2o3Xr1g415Od46aWXHO3bt9ftf//99w417OlQTp5DOaPO5/NzouafOVQvoUdVHDt2zCG2Fw7yLhjvluRLMu7dfffdjlmzZmn71ahRwxEVFeVQvZIetWEUEk7333+/cckjCRQIARRIK2yEBEggRwKGs7R06VKHGtJyqKEH7UioScoO1VPkOHLkiK5DHAz5UZKjJDVM56hYsaKjUaNG+lxnqn/k+XLlyjnatWtnZDneeecd/ezu3budeVmd5KZeQ6YZM2ZkVZ0zX374b731Vud1ZieGo+F6TxwH1SvhUD1szuyLFy86VM+UQxwwI4mjKHyEo3sSR0QcEtWz5lBDTtq5EIcgJSXFvah2bqSeX3/9NcM91wxhKeWErWsydBCnxjWpnjFd/u2333bN1g6u1GM4xXJTdFYT5h0HDhxwllUT5/XzFSpUcKh5bM58cZLledUT6MzLz0lunCVpR2wv7RvvpdG2OJuiw0033WRk6eO+ffsc4eHhDjXMmS4/pws6SzkR4n1fEOAwnPp2M5GAmQjIsJtMFpahNFkBFB0dDfWDDRkayixt3boVMm9GJli7DlcUK1YMt99+O5TTABn6ym3yVb0yXCT6PPfcc3oIUDk8HovWtGlTVKlSxVlehnRkIrnrEKWwkKQcRWc540T9OOObb77R85lkKE/9UdUT6GUYzj0Zz7uuRHQv48m12MA1/f777/rSfUWXDGPJcJqsrHNNorMM0xlJht0kyUo11/lPRr4rC+MZ16NyDOH6EQa+TLJYQWzsrm9MTIyeSO6ury9lYd0kkFcCdJbySo7PkYCPCMiSdzVMhNWrV2snaN26dVDDLVm2JquEJKmehgxlVI8T1LAPZDl8bpOv6v3ggw/0KjVZXi/zW2TOkuppwvbt23MUsXTp0hnKiAPk6nAZ5+JIZZZq1aqlVxfKvCGZMJ8ZN3nOeN6oL7O6PMlzr1+4SlgICWvgmmSejzjGBnfjnvBxTcZqsKzyRa/skjjirh+ZDO/LZOjjzkHalPfTuO9LGVg3CeSXAJ2l/BLk8yTgZQLSQyAxe6RHIbMfGPfmDAfi8OHD7re0syW9TTK5OLfJV/VK74ksMd+yZQvU0CJkMrb0fsmEbG+kMmXK6GpkhVpm6dNPP4WaN6MnRMtk72XLlmVWTK9wkxtGfZkW8iBTnCDXJFylZ0fCLbgm6eERHvltz7XOzM7FEXf9eIt7Zm1JXk7vka/1zUou5pNAbgjQWcoNLZYlARMSqFu3rh6mmTZtmh5WMkSUuEzfffedc4Wc5EsvjCRPektyU6+uNA//yNCiDM+oib+QYb+8DBe6Nysr5CTt3LnT/RbWr1+PQYMG4b777oOa+I7GjRtDVvBl1vO2a9cu/byaUJ6hnvxkGOEFpkyZkq4asZXYzLif7qYXL8QRd/0Yzkx+m8jq3ZIVmhKo1F1fNQ8LMiTpa33zqxefJwEhUIgYSIAErE1Aeo7UZGE9pCRznNRkZyQmJkJNONZL0dVKM6eCahK4Plcr7KBWFOnhGHGKMgvcmJt6nQ14cKJWc+m5WOKoSI+XLB+X2DlG2AMPqsi2iNQvP87SWyVBDI0kjogEj5SwCWqlnA5uKPOXZO6SREmXYUHXJM+LI2Ewc72Xn3MJ6aBWw+mhyLNnz+ohVhlqVRPCdXgAmXvmryShB4ygkiKb9HZJrClJLVu2hJpwnqVoRlwmNUFdv08yjCmshaFa9aeDhIqTKo6xDL1J76KUEb1zSmq1o7MnTuKOybwsQy41QT/DkGZO9fE+CeSagPoyMJEACZiAgLEazljWnpVIxsoz91VHshpKlpWrHyC9DF39j93xzz//ZKhGxffRq+eUMyQzezOsXnJ/wJN6DZk8WQ2nJnY7VM+GQzlKejWULCFXEbMdJ06ccDZtrCRzZqgTWRnWpUsX1yx9LqvZ5OOalMOhQwy45qmAmTpkgXtYA2MVl4oS7Swuy/ulPQkHkFPKaTWcGm7LUIXq2dPhBKQNCQOghlsdDz30kENWjrmmrHQWu6m4TK5FHVnJka5QDheyJF/qzuwj72dOSW2Xolcaqgnzug7XZ9Twp0M5yDp0g4QMuOWWWxzutsiqfrFvZjJJnrx7TCTgawJB0oB64ZhIgARIwDYEpIdEekKkd0h6mnKbZIWWilkF9WMOY1gvt3WwPAmQgH0I0Fmyjy2pCQmQgAsBmYskQ29GtGuXWzmeyio9WTU3YcKEHMuyAAmQgP0JcIK3/W1MDUkgIAm89957undJ9hXLTZLJ3mrYB6+//npuHmNZEiABGxNgz5KNjUvVSIAESIAESIAE8k+APUv5Z8gaSIAESIAESIAEbEyAzpKNjUvVSIAESIAESIAE8k+AzlL+GbIGEiABEiABEiABGxNgUEovGFf23pLNOyWwn/vWBl6onlWQAAmQAAmQAAn4gIBET5JFILJPoQTizSrRWcqKTC7yxVGSHbSZSIAESIAESIAErEdg//79qFy5cpaC01nKEo3nN4ytIgR2ZGSk5w/mUDI5ORnz5s3TwfFkl3A7JrvraHf95J20u47Uz/p/eWhDa9vQl/aTbX2ks8P4Hc+KFJ2lrMjkIt8YehNHydvOUpEiRXSddnaW7KyjfMntrJ98TeyuI/XLxR9DkxalDU1qGA/FKgj7Gb/jWYmU9QBdVk8wnwRIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQCiACdpQAyNlUlARIgARIgARLIPQE6S7lnxidIgARIgARIgAQKiIDD4cCW+CCkpTkKqMWMzdBZysiEOSRAAiRAAiRAAn4mkKqco1/WHUK3j5Zg3OYQ/L71uN8kKuS3ltkwCZAACZAACZAACbgRSElNw09rD+GjP3Zg5/EL+m54iAMnzie5lSy4SzpLBceaLZEACZAACZAACWRBQJykH9Ycwpjft2PvyQRdKqpwKO5vWwUVzm3FHS0rZ/Gk77PpLPmeMVsgARIgARIgARLIgoAMt/2sepLe/207dp+43JNUumgY+neogXvbVEFECDB79tYsni6YbDpLBcOZrZAACZAACZAACbgQkAnbs9YfxugF25zDbaWUk/TglTXQu21VFAm77KIkJye7POWfUzpL/uHOVkmABEiABEggIAmIkzR34xHlJG3H1qPnNIMSRULxgHKS7m9bDUXDzeeamE+igHx1qDQJkAAJkAAJ2JuAhACYv+koRiknafPhs1rZyIhCGKCG2/q0r4biEaGmBUBnybSmoWAkQAIkQAIkYA8Cy3adxJtztmD1vnitUDHVe/SfK6qjn/rIJG6zJzpLZrcQ5SMBEiABEiABixKQHqS3lZP0x/9iJBUODUFf1YskQ24lioRZRis6S5YxFQUlARIgARIgAWsQ2H8qAaPmb8P3aw5Cjb6hUHAQ7moVg0HX1ka54hHWUMJFSjpLLjB4SgIkQAIkQAIkkHcCJ88n4kMVTHLq0n1IUnGTJHVtXAFDOtdFtTJF816xn5+ks+RnA7B5EiABEiABErA6gYSkFHy2aDfG/7UL5xNTtDpX1CqDZ2+oh0aVo6yuHugsWd6EVIAESIAESIAE/ENAwgB8v/og3pm7FUfOXtJCxFaK1E5Sh9pl/SOUD1qls+QDqKySBEiABEiABOxO4N/dp/DarE1Yd+CMVrVyycJ4RvUkdW1UAcFqjpKdEp0lO1mTupAACZAACZCAjwnsPXkBb/66Bb9uOKJbkjAAj3SspVe5RajVbnZMwVZTauzYsahevToiIiLQokULLFq0KEsV+vTpg6CgoAyfhg0bOp+ZOHFihvvyzKVLl7sTnQV5QgIkQAIkQAIBTODMxWSMmL0ZnUb+pR0l6Ty6p3UVLHz6ajx0dU3Y1VESk1uqZ2n69Ol44oknIA5T+/btMX78eNx4443YtGkTqlSpkuEVfv/99/Hmm28681NSUtCkSRPceeedzjw5iYyMxNat6TfpE2eMiQRIgARIgAQCnUCKWtX21b/7dOTtUxeSNI4OtcvghS4NUDe6eEDgsZSzNHLkSPTr1w/9+/fXxhk9ejTmzp2LcePG4Y033shgsKioKMjHSD/88ANOnz6Nvn37Gln6KD1J0dHR6fJ4QQIkQAIkQAKBTkAibw/7aSO2HLm8h1utcsXwfJf6uLpOWT0qEyh8LOMsJSUlYeXKlXjuuefS2aZz585YvHhxurysLj777DNcd911qFq1aroi58+f13mpqalo2rQpXn31VTRr1ixdGdeLxMREyMdIZ89e3uNGdkb25u7IRl3G0WjPTkdDN+NoJ91EF0Mv42g3/QJBR8N2xtFuNjT0Mo5204/vaN4sKivb3pqzDb+sP6IrKKG2JHn82pq4K64yCoUEQ0ZqCioZ76Zx9Ga7ntYZpDa2U7E1zZ8OHTqESpUq4Z9//kG7du2cAo8YMQJffvllhmE0Z4H/nRw+fBgxMTGYNm0aevTo4by9dOlS7NixA40aNYI4PTJ0N3v2bKxduxa1a9d2lnM9efnllzF8+HDXLH0udRcpUiRDPjNIgARIgARIwAoEUlQcyT8OB2HegWAkpak5v3CgXXkHusSkoaj5t3DLNeKEhAT06tULZ86c0VNysqrAMj1LhgIyZOaaxNdzz3O9b5zLRO4SJUrg1ltvNbL0sU2bNpCPkWQuVPPmzTFmzBh88MEHRna649ChQzF48GBnnjhZ4ohJL5fMf/JWEo93/vz56NSpE0JDbfiWKlB219Hu+sm7bncdqZ+3/qL5rx7a0DP2C7cdx+uzt2LPyQT9QPMqJfBSl3poWNF7v2ueSZK+lC/tZ4wMpW8x45VlnKUyZcogJCQER45c7hI0VDl27BjKly9vXGZ6FIfq888/R+/evREWlv3GfcHBwWjZsiW2b9+eaV2SGR4erj/uBcSh8YVT46t63eX357XddbS7fvLu2F1H6ufPvxDeaZs2zJyjhAJ49ZdNWLD5mC5Qplg4/ntTPXRvVsmjzojMa/V+ri/sJ3V6koI9KWSGMuLkSKgA6WlxTXLtOiznes84//PPP/VQm0wOzymJY7VmzRpUqFAhp6K8TwIkQAIkQAKWJZCYkorRC7ah06i/tKMkm932v6I6/hhyFW5rXtlUjpK/IVumZ0lAydCX9A7FxcWhbdu2+OSTT7Bv3z4MHDhQc5ThsYMHD2LSpEnpuMrE7tatWyM2NjZdvlzI3CMZhpP5SdIdJ0Nv4ix99NFHGcoygwRIgARIgATsQGDxjhN44YcN2HXiglanfa3SGN6tIWqVC4xQALm1oaWcpZ49e+LkyZN45ZVXIBO2xfmRydjG6jbJE+fJNcmkre+++05P3HbNN87j4+PxwAMP6OE9CTMgq+D++usvtGrVyijCIwmQAAmQAAnYgsDxc4k6sKTs5yapbPFwvNS1Abo2rsCepGwsbClnSfR4+OGH9ScznWQSt3sSB0hmu2eVRo0aBfkwkQAJkAAJkIBdCciGt18t34e31DYlZy+lKMcI6N2mKp7qXBdRKiwAU/YELOcsZa8O75IACZAACZAACbgS2Hz4LP77/Xqs3hevs2V124jujdAkpoRrMZ5nQ4DOUjZweIsESIAESIAErErgQmIK3v9tOz77ezdSVc9S0bAQ3ZN0X9uqOrCkVfXyh9x0lvxBnW2SAAmQAAmQgA8J/L7lKF74fgMOnbmkW7kxNhrDbm6I6KgIH7Zq36rpLNnXttSMBEiABEggwAicPJ+I4T9vwk9rD2nNK5csjFduaYhr6mUfjzDAMOVaXTpLuUbGB0iABEiABEjAXARk47If1x5WEbi34HRCMlTIJPTvUANPXlcHhdXwG1P+CNBZyh8/Pk0CJEACJEACfiVwWA21fbIlGJuWrtdy1IsujrfvaIzGlUv4VS47NU5nyU7WpC4kQAIkQAIBQ0DCAUxdthdvqnAAF5KCERoShEHX1MaDV9VEWCHLbNBhCXvRWbKEmSgkCZAACZAACfw/gZ3Hz2Pod+vx755TOrN6cQfG9mmH+pVK/n8hnnmNAJ0lr6FkRSRAAiRAAiTgWwLJqWn45K9dOiRAUkoaiqj5SEM61UbJkxvUViXFfNt4ANdOZymAjU/VSYAESIAErENg65FzeGrGGmw4eFYLfVWdsni9eyzKFwtVW39tsI4iFpSUzpIFjUaRSYAESIAEAodAiupNGi+9SQu2I0mdy/Ykw25ugO7NKun93JKTkwMHhp80pbPkJ/BslgRIgARIgARyIrDjmPQmrcPa/fG66HX1y+mtSspFMrhkTuy8eZ/Okjdpsi4SIAESIAES8AIB2Z7ks7934d152yBzk4pHFMLLKgL3bc0v9yZ5oQlWkQsCdJZyAYtFSYAESIAESMDXBHafuIAhM9Zi5d7TuimZm/Tm7Y1QIaqwr5tm/VkQoLOUBRhmkwAJkAAJkEBBEpC4SRMX78Hbc7fgUnIaioUXwotd66NHXIyem1SQsrCt9AToLKXnwSsSIAESIAESKHAC+04mYMi3a/Hv7stxk9rXKo23bm+MyiWLFLgsbDAjATpLGZkwhwRIgARIgAQKhIBDber2zYr9eEVtfnshKVXHTRp6U33c27oKe5MKxAKeNUJnyTNOLEUCJEACJEACXiVw8nwihs5cj3mbjup6W1UrhXfvbIIqpdmb5FXQXqiMzpIXILIKEiABEiABEsgNgd+3HMUz367HCeUwyZ5uT3WuiwEdaiAkOCg31bBsARGgs1RAoNkMCZAACZAACSQkpeD1WZvVBrj7NIw65YthVM+maFgxinBMTIDOkomNQ9FIgARIgATsQ2CNCiz55PQ1kNAAkv7TvjqeuaEuIkJD7KOkTTWhs2RTw1ItEiABEiABcxCQ7Uo++mMnPvh9OyTYZLSKvv1ejyZoX6uMOQSkFDkSoLOUIyIWIAESIAESIIG8EdijepGe/GYNVu+L1xXc3KQiXrslFlFFQvNWIZ/yCwE6S37BzkZJgARIgATsTEBCAny78gCG/bQRCSokgGxX8tqtsbilaSU7q21b3egs2da0VIwESIAESMAfBM5eSsbz32/Az2sP6ebb1Cilht2aolIJblfiD3t4o006S96gyDpIgARIgARIQBFYte80Bn21GgdOX9RhAAZ3qoOBV9VkSACLvx10lixuQIpPAiRAAiTgfwIycXvcwh0YteDyJO6YUoXx/l3N0LxKSf8LRwnyTYDOUr4RsgISIAESIIFAJnD4zEUdEmDprsv7unWTSdzdYxEZwUncdnkv6CzZxZLUgwRIgARIoMAJzN14BM9+tw7xCcl6X7dX1Eq325tX4r5uBW4J3zZIZ8m3fFk7CZAACZCADQlcSk7Fa7M2YcrSy5G4G1WKUsNuTVGjbDEbakuV6CzxHSABEiABEiCBXBDYeuQcHvtqFbYdPa+feuDKGhii9nYLKxSci1pY1EoE6CxZyVqUlQRIgARIwG8EJHbSV//ux/CfNyIxJQ1lioVjpIrEfWWdsn6TiQ0XDAE6SwXDma2QAAmQAAlYmMA5FTvpvy6xk65SDpJsWSIOE5P9CdBZsr+NqSEJkAAJkEA+CGw8dAaPTlutN8ANCQ7CM9fXxYAONRCszpkCgwCdpcCwM7UkARIgARLIJQEZdpu6bB9e+WUTktSwW8WoCIzp1QwtqpbKZU0sbnUCdJasbkHKTwIkQAIk4HUCMuw2dOZ6/LLusK772nrl8O6dTVCyaJjX22KF5idAZ8n8NqKEJEACJEACBUhgw0EZdluFPScTUEgNtT17Qz3071CdsZMK0AZma4rOktksQnlIgARIgAT8QkCG3aaoYbdXf1bDbqlpeuNbGXbjliV+MYepGqWzZCpzUBgSIAESIAF/EDgrw27frces9ZeH3a6rf3nYrUQRDrv5wx5ma9NyEbTGjh2L6tWrIyIiAi1atMCiRYuyZLpw4ULdbRoUFJTuuGXLlnTPfPfdd2jQoAHCw8P18fvvv093nxckQAIkQAL2JSDDbjeP+Vs7SjLs9kKX+phwXxzoKNnX5rnVzFLO0vTp0/HEE0/g+eefx+rVq9GhQwfceOON2LdvX7Z6b926FYcPH3Z+ateu7Sy/ZMkS9OzZE71798batWv1sUePHli2bJmzDE9IgARIgATsSWD68n24bdxi7FXzkyqVKIwZA9uq+Uk1OD/JnubOs1aWcpZGjhyJfv36oX///qhfvz5Gjx6NmJgYjBs3LlsA5cqVQ3R0tPMTEhLiLC91dOrUCUOHDkW9evX08dprr9V1OwvxhARIgARIwFYEZG+3p2esVZvgrtdhAWTYbfagDmhWpaSt9KQy3iFgmTlLSUlJWLlyJZ577rl0mnfu3BmLFy9Ol+d+0axZM1y6dEkPsb3wwgvo2LGjs4j0LD355JPOazm5/vrrs3WWEhMTIR8jnT17Vp8mJydDPt5KRl3G0Vv1mqkeQzfjaCbZvCGLoZdx9EadZqvD0M04mk2+/Mpj6GUc81uf2Z439DKOZpPPG/IYuhnHvacS8OhXa7FF7fEmcSWfvLYWHlCr3YJV94FRxhvtFlQdhszGsaDaLah2DL2Mozfb9bROyzhLJ06cQGpqKsqXL5+Ok1wfOXIkXZ5xUaFCBXzyySd6bpM4N5MnT4b0GslcpiuvvFIXk2dzU6c89MYbb2D48OFGM87jvHnzUKRIEee1t07mz5/vrapMW4/ddbS7fvJi2V1H6mfaPx8eCyY2XH8qCFN3BONiahCKFXLg/jppqHJhC+bMST+X1eNKTVSQ72jujZGQkODRQ5ZxlgxtZLK2a5Klnu55xv26detCPkZq27Yt9u/fj3fffdfpLMk99+ezq1PKy5Dd4MGD5VQn6VmS4UDp5YqMjDSy830Uj1defhkmDA0NzXd9ZqzA7jraXT95p+yuI/Uz41+O3MkkNpwzbz42hdTAp1v36YebVymB93s2RnRkRO4qM2FpvqN5N4oxMpRTDZZxlsqUKQOZa+Tei3Ts2LEMPUPZKd2mTRtMmTLFWUTmMuW2Tlk1Jx/3JA6NL5waX9XrLr8/r+2uo931k3fH7jpSP3/+hchf2yfOJ2LcpmBsP3vZUfpP++oYelM9hIaocTcbJb6juTemMPMkWeZNCQsL08Np7t2Mct2uXTtPdNVlZBWdDM8ZSXqb3OuU4bTc1GnUxSMJkAAJkIC5CKzYcwq3jF2qHKVgFA0LwYcqyORLNzewnaNkLur2k8YyPUuCXoa+ZIl/XFwcxMmR+UgSNmDgwIHaMjI8dvDgQUyaNElfy0q3atWqoWHDhpAJ4tKjJDGV5GOkxx9/XA/JvfXWW7jlllvw448/YsGCBfj777+NIjySAAmQAAlYjIBMp/js791489ctSElzoHxhByb2b436lUpaTBOKawYClnKWJB7SyZMn8corr+iYSbGxsZg9ezaqVq2qWUosJdeYS+IgDRkyRDtQhQsX1k7TrFmzcNNNNznZSw/S119/DVkl9+KLL6JmzZqQeE6tW7d2luEJCZAACZCAdQjIJrjPfrcOs9dfXvzTtVE0rix8ALXKFbOOEpTUVAQs5SwJuYcfflh/MqM4ceLEdNnPPPMM5JNTuuOOOyAfJhIgARIgAWsT2HHsHB6YvBK7jl9QQ20SjbsB7o6riF9/PWBtxSi9XwlYzlnyKy02TgIkQAIkYFoCczYcwVPfrMGFpFS9yu2je5qjRdWSloydZFrIASoYnaUANTzVJgESIAG7EEhVc5JGzd+GD//YoVVqXb0UxFEqUyzjqmW76Ew9CpYAnaWC5c3WSIAESIAEvEjgTEIyHp++Ggu3Hte12jUsgBeRsao8EKCzlAdofIQESIAESMD/BLYcOYsH1fwk2QQ3IjQYb97WGLc2q+R/wSiB7QjQWbKdSakQCZAACdifwM9rD+GZb9fhotoQt3LJwhjfuwUaVoyyv+LU0C8E6Cz5BTsbJQESIAESyAuBlNQ0vD13Kz75a5d+/IpaZTDm7mYoWTQsL9XxGRLwiACdJY8wsRAJkAAJkIC/CZy6kITHvlqFf3ac1KIMvKomnr6+LkKC0+8Z6m852b79CNBZsp9NqREJkAAJ2I7AhoNn9Pykg/EXUURtW/L2HY3RtXFF2+lJhcxJgM6SOe1CqUiABEiABP5HYOaqAxg6cz0SU9JQtXQRfNI7DnWji5MPCRQYATpLBYaaDZEACZAACeSGgMxPGjF7Cz7/Z7d+rGPdshjdsxmiini2U3xu2mJZEsiOAJ2l7OjwHgmQAAmQgF8IxCck4dFpq/H3jhO6/ceuqYUnr6uDYM5P8os9Ar1ROkuB/gZQfxIgARIwGYFtR89hwKQVOn5S4dAQjOzRBDc2qmAyKSlOIBGgsxRI1qauJEACJGByAvM3HcUTX6/W+7tVKlEYE+6LQ4OKkSaXmuLZnQCdJbtbmPqRAAmQgAUIOBwOfKT2dntP7fGmTiH7u41V+7uV5v5uFrCe/UWks2R/G1NDEiABEjA1gYSkFDw9Yx1mrT+s5ezdpipeurkBQkOCTS03hQscAnSWAsfW1JQESIAETEfgwOkEPDBpJTYdPotCavL2K7fEolfrKqaTkwIFNgE6S4Ftf2pPAiRAAn4j8O/uU3hoykqcVJG5S6vtSsbd2wKt1PAbEwmYjQCdJbNZhPKQAAmQQAAQmLpsL4b9uBEpaQ40qBCJT+5roTbELRIAmlNFKxKgs2RFq1FmEiABErAogWQVaHL4zxsxZek+rUGXxhXwjtq6pEgYf44satKAEJtvZ0CYmUqSAAmQgP8JnDyfiIenrsIyNfwmSTbBffjqmggK4ka4/rcOJciOAJ2l7OjwHgmQAAmQgFcIbD1yDv2+XI4Dpy+iqNoId/RdzdCpQXmv1M1KSMDXBOgs+Zow6ycBEiCBACfw2+ajGPTV5UCTVUoVwaf3x6FOeW6EG+CvhaXUp7NkKXNRWBIgARKwDgEJNPnZ37vx+uzNOtBkmxqlMO6eFiipVr4xkYCVCNBZspK1KCsJkAAJWIRAUkoaXvhhPb5ZcUBLfHerGAzvFouwQgw0aRETUkwXAnSWXGDwlARIgARIIP8ETqm4SQNV/CSJo6TiTOL5Lg3wn/bVOJE7/2hZg58I0FnyE3g2SwIkQAJ2JLD9qEzkXoF9pxJQLLwQxtzdDB3rlbOjqtQpgAjQWQogY1NVEiABEvAlgYVbj+GxaatxLjEFMaUK47P7W3Iity+Bs+4CI0BnqcBQsyESIAESsCcBmcg9cfEevPrLJqiA3GhVrRQ+7t0CpTiR254GD0Ct6CwFoNGpMgmQAAl4i4BE5H5JbVvy1b+XI3Lf2aIyXusei/BCId5qgvWQgN8J0FnyuwkoAAmQAAlYk0B8QpLaCHcVluw6qSZvA0NvrIcBHWpwIrc1zUmpsyFAZykbOLxFAiRAAiSQOYGdx8+j38Tl2HMyQUfk/kBN5L62PiNyZ06LuVYnQGfJ6hak/CRAAiRQwAQWbT+u93g7dykFlUqoidx94lAvOrKApWBzJFBwBOgsFRxrtkQCJEAClicweelevPzTRqSqmdwtqpbEeDWRu0yxcMvrRQVIIDsCdJayo8N7JEACJEACmoA4R6/P2ozP/9mtr29rVgkjbmuEiFBO5OYrYn8CdJbsb2NqSAIkQAL5InBBxU16/OvVWLD5mK5nSOc6eKRjLU7kzhdVPmwlAnSWrGQtykoCJEACBUzgyJlLKiL3cmw8dFbv6zayRxN0bVyxgKVgcyTgXwJ0lvzLn62TAAmQgGkJbDh4RjtKR88morQKMDnh/jg0r1LStPJSMBLwFQE6S74iy3pJgARIwMIEFmw6ikFq6C0hKRW1yhXDF31aqi1MilhYI4pOAnknQGcp7+z4JAmQAAnYjoBsXfLFP3vw2qzLW5dcUasMPrqnOaIKh9pOVypEAp4SCPa0oFnKjR07FtWrV0dERARatGiBRYsWZSnazJkz0alTJ5QtWxaRkZFo27Yt5s6dm678xIkT9STFIBV+1vVz6dKldOV4QQIkQAJ2J5Citi4ZpsICvPK/Pd7ubhWDL/q2pKNkd8NTvxwJWMpZmj59Op544gk8//zzWL16NTp06IAbb7wR+/Zd3pPIXdu//vpLO0uzZ8/GypUr0bFjR9x88836Wdey4kgdPnw43UecMSYSIAESCBQCEmCy/6QVmLRkr/qP4+WtS0Z0b4TQEEv9TASKuahnAROw1DDcyJEj0a9fP/Tv319jGj16tO4pGjduHN54440M6OS+axoxYgR+/PFH/Pzzz2jWrJnzlvQoRUdHO695QgIkQAKBROBUInD3p/9i69HzKm5SMEb3bIobYisEEgLqSgLZErCMs5SUlKR7h5577rl0CnXu3BmLFy9Ol5fVRVpaGs6dO4dSpUqlK3L+/HlUrVoVqampaNq0KV599dV0zlS6wuoiMTFRf4z8s2fP6tPk5GTIx1vJqMs4eqteM9Vj6GYczSSbN2Qx9DKO3qjTbHUYuhlHs8mXX3kMvYxjfusz2/Or957EqPUhOJt8HmWLheHje5qhceUor/4t87fOhu2Mo7/l8Xb7hl7G0dv1+7s+Qy/j6E15PK0zSE3mc3izYV/VdejQIVSqVAn//PMP2rVr52xGeou+/PJLbN261ZmX1ck777yDN998E5s3b0a5cuV0saVLl2LHjh1o1KgRxOl5//33IcN2a9euRe3atTOt6uWXX8bw4cMz3Js2bRqKFOFqkQxgmEECJGBKAutOBWHS9mAkpwWhQhEHHqiXilLcucSUtqJQviGQkJCAXr164cyZM3puc1atWKZnyVBAhsxck/h67nmu943zr776CuLkyDCc4SjJvTZt2uiPUa59+/Zo3rw5xowZgw8++MDITnccOnQoBg8e7MwTJysmJgbSyyXzn7yVxOOdP3++nncVGmrPlSh219Hu+sm7bncd7aif/N387J+9+HzpNsh/l+uXSMOXD16FksUKe+vPl6nqsaMNXQFTP1cauTs3RoZyesoyzlKZMmUQEhKCI0eOpNPp2LFjKF++fLo89wuZGC5znWbMmIHrrrvO/Xa66+DgYLRs2RLbt29Pl+96ER4eDvm4J3FofOHU+Kped/n9eW13He2un7w7dtfRLvolqxVvshHutGX79Ff+HrXirUXwbu0oiY52TnaxYVY2on5Zkck639N3PjjrKsx1JywsTIcKkJ4W1yTXrsNyrvfkXHqU+vTpAxki69Kli/vtDNfyP641a9agQgVObswAhxkkQAKWJnD2UjL+M3G5dpSkk/7Frg0wrGs9hKTvsLe0jhSeBHxBwDI9S6K8DH317t0bcXFxOmbSJ598osMGDBw4ULOR4bGDBw9i0qRJ+locpfvuu0/PQ5LhNqNXqnDhwoiKitJlZO6R3JP5SdIdJ0Nv4ix99NFH+j7/IQESIAE7EDgUfxF9vvgX29SKtyJhIfjgrma4rkF5W03ktoOdqIM5CVjKWerZsydOnjyJV155RcdEio2N1ZOxZSWbJImV5Bpzafz48UhJScEjjzyiP4YJ7r//fkgwSknx8fF44IEHtCMlDpSEFJD4TK1atdL3+Q8JkAAJWJ3AJrUJbt+J/0L2eCtXPByfq61LYitd/g+j1XWj/CRQEAQs5SwJkIcfflh/MoNjOEDGvYULFxqnWR5HjRoF+TCRAAmQgB0JLNp+HA9NWYXziSmoU17t8da3FSqVsOdEbjvajzqZg4DlnCVzYKMUJEACJGB+At+uPIDnvluHlDQH2tQohfG947h1ifnNRglNSIDOkgmNQpFIgARIID8EZKHKmN93YOT8bbqaW5pWxNt3NEZ4oZD8VMtnSSBgCdBZCljTU3ESIAE7EpDQAC/+sAFfL9+v1Xvo6pp4unNdBAdzyZsd7U2dCoYAnaWC4cxWSIAESMDnBC6oeUkPT12FP7cdh/hGw7s1RO+21XzeLhsgAbsToLNkdwtTPxIggYAgcOzcJR1DacPBs3oz3DF3N0cnFRqAiQRIIP8E6CzlnyFrIAESIAG/Ethx7Bzu/3w5DqpYSqWLhuHT++PQrEpJv8rExknATgToLNnJmtSFBEgg4Aj8u/sUBkxagTMXk1GtdBF8+Z9WqFq6aMBxoMIk4EsCdJZ8SZd1kwAJkIAPCfyy7hAGT1+LJDWpu1mVEvj0vjiULpZx30ofisCqSSAgCNBZCggzU0kSIAE7EZDQAJ8u2o3XZ2/WanVWc5PeV9uXFFbbmDCRAAl4nwCdJe8zZY0kQAIk4DMCqSrA5Ku/bMLExXt0G33aVdMb4oYwNIDPmLNiEqCzxHeABEiABCxC4FJyKh7/ejXmbjyqJX7+pvro36E6goIYQ8kiJqSYFiVAZ8mihqPYJEACgUXg1IUk9PtyOVbvi0dYSDDe69EENzepGFgQqC0J+IkAnSU/gWezJEACJOApgb0nL6DPF8ux+8QFREYUwgQ1kbt1jdKePs5yJEAC+SRAZymfAPk4CZAACfiSwJr98eg3cTlOqp6lSiUKq9AALVGrXHFfNsm6SYAE3AjQWXIDwksSIAESMAuB+ZuO4rGvVuFSchpiK0Xi8/tbolxkhFnEoxwkEDAE6CwFjKmpKAmQgJUITF6yB8N+2gi1+A1X1SmLsfc0R9Fw/sm2kg0pq30I8JtnH1tSExIgARsQSFPe0dtzt+LjP3dqbXrGxeC17rEIVZO6mUiABPxDgM6Sf7izVRIgARLIQCAxJRVPz1iHn9Ye0vcGd6qDx66pxdAAGUgxgwQKlgCdpYLlzdZIgARIIFMCsrfbg5NXYOmuUyikAky+eXtj3NGicqZlmUkCJFCwBOgsFSxvtkYCJEACGQgcjL+IPp//i+3HzqOYmpc07t7m6FC7bIZyzCABEvAPATpL/uHOVkmABEhAE9h46Az6qhhKx84lonxkOL7o0woNKkaSDgmQgIkI0FkykTEoCgmQQGAR+GvbcTw0ZSUuJKWiTvlimNi3FSqqWEpMJEAC5iJAZ8lc9qA0JEACAUJgxor9GDpzPVLU6re2Khr3x71bIKpwaIBoTzVJwFoE6CxZy16UlgRIwOIEHA4H3v9tO0Yv2K41ubVpRbx1R2OEFwqxuGYUnwTsS4DOkn1tS81IgARMRiA5NQ3Pf78e36w4oCV7+OqaGNK5LoLV6jcmEiAB8xKgs2Re21AyEiABGxE4n5iCh6eugsxTEt/olVticW+bqjbSkKqQgH0J0Fmyr22pGQmQgEkIHDt7CX3VZrgbD51F4dAQjLm7Ga5rUN4k0lEMEiCBnAjk2lmS8fY///wTixYtwp49e5CQkICyZcuiWTP15b/uOsTExOTUJu+TAAmQQMAQ2H70HPqo0AASS6l00TB83qclmsSUCBj9qSgJ2IGAx5sNXbx4ESNGjNDO0I033ohZs2YhPj4eISEh2LFjB4YNG4bq1avjpptuwtKlS+3AhjqQAAmQQL4ILN11ErePW6wdpeplimLmw+3oKOWLKB8mAf8Q8LhnqU6dOmjdujU+/vhjXH/99QgNzbjEde/evZg2bRp69uyJF154AQMGDPCPVmyVBEiABPxMQPZ3G/LNWiSpSd3Nq5TAp/e3RCnVs8REAiRgPQIeO0u//vorYmNjs9WwatWqGDp0KJ566imI48REAiRAAoFGQKYqfPLXLrzx6xat+g0NozH6rqaIUHOVmEiABKxJwGNnKSdHyVX9sLAw1K5d2zWL5yRAAiRgewKpKsDk8J83YtKSy/9Z7Nu+Gl7o0gAhDA1ge9tTQXsT8HjOkiuGF198Eampqa5Z+vzMmTO4++67M+QzgwRIgATsTuCi2rJkoNq6xHCUXuhSH8NubkhHye6Gp34BQSBPztKkSZPQvn177Ny50wlp4cKFaNSokV4h58zkCQmQAAkEAIGT5xNx94SlmL/pKMIKBeOjXs3Rv0ONANCcKpJAYBDIk7O0bt06VKtWDU2bNsWECRPw9NNPo3PnzujTpw/+/vvvwCBHLUmABEhAEdhz4oJe8bZmf7ze221q/9bo0rgC2ZAACdiIgMdzllx1joqKwtdff43nn38eDz74IAoVKgSZAH7ttde6FuM5CZAACdiawKp9p9H/yxU4dSEJlUsWxsS+rVCrXDFb60zlSCAQCeSpZ0lAjRkzBqNGjdJzlGrUqIFBgwZh7dq1gciQOpMACQQggXkbj6CXGnoTR6lRpSgdQ4mOUgC+CFQ5IAjkyVmSoJTDhw+HzF2aOnUqVq9ejSuvvBJt2rTB22+/HRDgqCQJkEDgEpi0ZA8eVJO5LyWnoWPdsvj6gTYoVzwicIFQcxKwOYE8OUspKSmQeUt33HGHxlO4cGGMGzcO3377re5tsjkzqkcCJBCgBNJUaIA3Zm/GSz9uhAqnhLtbxWDCfXEoGp6nGQ0BSpFqk4D1COTJWZo/fz4qVqyYQdsuXbpg/fr1GfK9mTF27Fi9rUpERARatGih96jLrn7Zx07KSXkZLpQI5O7pu+++Q4MGDRAeHq6P33//vXsRXpMACQQ4gcSUNDw+fQ3Gq4CTkoZ0roMR3RuhUEie/owGOE2qTwLWIuD1b3mZMmU0AYli6+00ffp0PPHEE3piuQz9dejQATIkuG/fvkyb2r17t96rTspJ+f/+9796bpU4R0ZasmSJ3p6ld+/ees6VHHv06IFly5YZRXgkARIIcAIJKUDfL1fiZ7WFSSEVYHJkjyZ49JraCAoKCnAyVJ8EAoOAx85S/fr19b5vSUlJ2ZLZvn07HnroIbz11lvZlsvLzZEjR6Jfv37o378/RJ7Ro0frjX1lCDCzJL1IVapU0eWkvDz3n//8B++++66zuNTRqVMnvU1LvXr19FFW9Uk+EwmQAAkcjL+I0RtCsHzPaRRTw22y4u225pUJhgRIIIAIeDzQ/tFHH+HZZ5/FI488omMqxcXF6aE4Gd46ffo0Nm3apGMsyfHRRx/Fww8/7FWM4qStXLkSzz33XLp6Jb7T4sWL0+UZF9JrJPddk2wC/NlnnyE5OVlvBixlnnzySdcieqPg7JylxMREyMdIZ8+e1adSp3y8lYy6jKO36jVTPYZuxtFMsnlDFkMv4+iNOs1Wh6GbcTSbfPmRZ+Ohs+g/eRVOXAxC+eLh+PS+5qgXXdyr3/P8yOeNZw27GUdv1Gm2OgzdjKPZ5MuvPIZexjG/9ZnteUMv4+hN+Tyt02Nn6ZprrsHy5cu1YyLDYdOmTdPRui9evAgZemvWrBnuu+8+3HvvvShRooQ3ddF1nThxQm+xUr58+XR1y/WRI0fS5RkXkp9ZeZmgLvVVqFBBP5tZmazqlLrfeOMNvRrQaMc4zps3D0WKFDEuvXaUOWJ2T3bX0e76yftpNx03xwfhi63BSEwLQoXCDgysfQG7Vi3C5RlL9vtG2s1+mVnI7jpSv8ysnn1eQkJC9gX+d9djZ8morV27dpCPv5L7HAGZG+We5yqb+z1jLpVrvuu5PJtTnUOHDsXgwYOdzUjPUkxMjO7FioyMdObn90Q8Xnn5ZZgwNDQ0v9WZ8nm762h3/eSlsqOOM1YexIRlmyAb47auVgLdy55Atxvt+T20o/3c/9jZXUfq525xz6+NkaGcnsi1s5RThb66L71XISEhGXqRjh07lqH3yJAhOjo60/IScbx06dK6WFZl3HubjDrlKKvm5OOexKHxhVPjq3rd5ffntd11tLt+8u7YQUf5j9LoBdvx/m/b9dehe7NKeK1bfSyYN8cW+mX3HbeD/bLTT+7ZXUfql9MbkPG+MPMk5dlZ+u233yAfcVbS0tLStfX555+nu/bGRVhYmA4BID0t3bt3d1Yp17fccovz2vWkbdu2+Pnnn12zIENlMt/KACRlpA7XeUtSxp+9Z+kE5gUJkECBEEhOTcN/Z67HjJUHdHuPdKypwgPUhQzbM5EACQQ2gTw5SxK9+5VXXtFOh8z7cR/G8hVSGfqSpf3i7IiT88knn+iwAQMHDtRNyvDYwYMHdWRxyZD8Dz/8UA+ZDRgwADKZWyZ3f/XVV04RH3/8cR19XFbvidP1448/YsGCBdwQ2EmIJyRgfwLnLiXj4amrsGj7CajIAHjt1kbo1bqK/RWnhiRAAh4RyJOzJEvyJ06cqB0Xj1rxUqGePXvi5MmT2lE7fPgwYmNjMXv2bFStWlW3IHmuMZeqV6+u70uvkazmk0CaH3zwAW6//XanRNKDJJsCv/DCC3jxxRdRs2ZNyAT21q1bO8vwhARIwL4Ejp69hD5fLMfmw2dRODQEH93TDNfUS7+QxL7aUzMSIAFPCOTJWZJl/P4appKQBFmFJRAHzj1dddVVWLVqlXt2umvZtsXYuiXdDV6QAAnYmsC2o+fQ5/N/cejMJZQpFobP+7RE48olbK0zlSMBEsg9AY+DUrpWLcEdJXQAEwmQAAlYlcCSnSdx+7jF2lGqUaYoZj7Uno6SVY1JuUnAxwTy1LN06dIlPV9I5vY0btzYOVnakFUibTORAAmQgFkJ/LjmIJ6esQ5JalJ3XNWSejPckkXDzCou5SIBEvAzgTw5S+vWrUPTpk216Bs2bEinQkFN9k7XKC9IgARIwAMCEhrg4z934a05W3TpG2OjMapnU0SouUpMJEACJJAVgTw5S3/88UdW9TGfBEiABExJQAJMDvtpA6Ys3afl63dFdTx/U30Ey/I3JhIgARLIhkCenKVs6uMtEiABEjAdgYtJqXjsq9VYsPmoCnUCvNClAcRZYiIBEiABTwh47CzddtttOlyAbOch59mlmTNnZneb90iABEigwAicOJ+Ifl+uwNr98QgrFIz31bDbjY0qFFj7bIgESMD6BDx2lqKiopzBJ+WciQRIgATMTmD3iQu4X4UG2HcqASWKhOLT++IQV62U2cWmfCRAAiYj4LGz9MUXXzhFHzt2rN7ipGjRojpvz549+OGHH1C/fn1cf/31znI8IQESIAF/EVi59zT6f7kcpxOSEVOqMCb2bYWaZYv5Sxy2SwIkYGECeYqzJNuCTJ48WasdHx+PNm3a4L333sOtt96KcePGWRgHRScBErADgTkbjqDXhKXaUWpcOUrHUKKjZAfLUgcS8A+BPDlLEhG7Q4cOWuJvv/0W5cuXx969e/WebLKdCBMJkAAJ+IvAxH9246GpK5GYkqa2LSmHrx9og7LFw/0lDtslARKwAQGPh+FcdU1ISEDx4sV11rx58/SE7+DgYN3DJE4TEwmQAAkUNIE0FRrgjV83Y8Ki3bpp2Qj3lW4NUSgkT/8nLGjx2R4JkICJCeTpr0itWrX0HKX9+/dj7ty56Ny5s1bx2LFjkNVyTCRAAiRQkAQuJadi0NernY7S09fXxeu3xtJRKkgjsC0SsDGBPDlLL730EoYMGYJq1aqhdevWaNu2rUYkvUzNmjWzMS6qRgIkYDYC8QlJ6P3ZMvyy7jBCQ4JURO4meKRjLefqXbPJS3lIgASsRyBPw3B33HEHrrjiChw+fBhNmjRxan3ttdeie/fuzmuekAAJkIAvCexXIQHu/+Jf7Dp+AcXDC+Hj3i3QvlYZXzbJukmABAKQQJ6cJeEUHR2tP67MWrVq5XrJcxIgARLwGYF1B+Lxn4nLceJ8EipERejQAHWjL8+l9FmjrJgESCAgCeTZWQpIWlSaBEjAFAR+U9uWPDptNS6quUr1K0Tiiz4tEa0cJiYSIAES8AUBOku+oMo6SYAEfEZg8tK9GPbjBqjFb7iyTll81KsZikeE+qw9VkwCJEACdJb4DpAACViCgIQGeGvuFoz/c5eWt0dcZbzevZGa1J2ndSqW0JlCkgAJmIMAnSVz2IFSkAAJZEMgMSUVQ2asw89rD+lSgzvVwWPXcMVbNsh4iwRIwIsE6Cx5ESarIgES8D6BM2pvtwGTV+Df3adQKDgIb97eGHe0qOz9hlgjCZAACWRBgM5SFmCYTQIk4H8CEhqgr1rxtuPYeR0aYNy9LXBF7TL+F4wSkAAJBBQBOksBZW4qSwLWIbD+wBntKJ04n4joSBUa4D8tUS+aOwRYx4KUlATsQ4DOkn1sSU1IwDYEft9yFI9MvRwaoJ6KnfRF35YqllJh2+hHRUiABKxFgM6StexFaUnA9gSmLduHF35Yr0MDdFBDbmPvac7QALa3OhUkAXMToLNkbvtQOhIIGAISGuDdeVsxduFOrbNM4n7jNoYGCJgXgIqSgIkJ0FkysXEoGgkECgEJDfDMt+vw45rLoQGeuK42Hr+2NjfDDZQXgHqSgMkJ0FkyuYEoHgnYnYCEBnhAhQZY9r/QANKbdGdcjN3Vpn4kQAIWIkBnyULGoqgkYDcCB04noM8Xl0MDFAsvpOcnyRYmTCRAAiRgJgJ0lsxkDcpCAgFEYMPBy6EBjp+7HBrgc7UZboOKDA0QQK8AVSUByxCgs2QZU1FQErAPgT+2HlOhAVYhISlVxU5iaAD7WJaakIA9CdBZsqddqRUJmJaAhAZ48ccNSFWr39rXKg2Jyh0ZEWpaeSkYCZAACdBZ4jtAAiRQIAQkNMBbc7dg/J+7dHu3Na+EN29rjLBCwQXSPhshARIggbwSoLOUV3J8jgRIwGMCl5JTMfibNZi9/oh+hqEBPEbHgiRAAiYgQGfJBEagCCRgZwKyt9uASSuwel88QkOC8PYdjdG9WWU7q0zdSIAEbEaAzpLNDEp1SMBMBHYcO6c3w91/6iKiCodifO8WaFOjtJlEpCwkQAIkkCMBOks5ImIBEiCBvBBYvPMEBk5eibOXUlClVBG9GW7NssXyUhWfIQESIAG/EqCz5Ff8bJwE7Eng25UHMHTmOiSnOtC8SglMuC8OpYuF21NZakUCJGB7AnSWbG9iKkgCBUfA4XBg1ILt+OC37brRLo0r4L07myAiNKTghGBLJEACJOBlAnSWvAyU1ZFAoBJITEnD09+twQ//2wz34atrYkjnuggODgpUJNSbBEjAJgQsE+Dk9OnT6N27N6KiovRHzuPj47M0Q3JyMp599lk0atQIRYsWRcWKFXHffffh0KHLu5obD1599dV6Z/OgoCDn8a677jJu80gCJOABgQvJQJ+JK7SjFKKcozfVZrjP3FCPjpIH7FiEBEjA/AQs4yz16tULa9aswZw5c/RHzsVhyiolJCRg1apVePHFF/Vx5syZ2LZtG7p165bhkQEDBuDw4cPOz/jx4zOUYQYJkEDmBPaeTMCoDSFYsTcexdVmuBP7tsRdrapkXpi5JEACJGBBApYYhtu8ebN2kJYuXYrWrVtrzBMmTEDbtm2xdetW1K1bNwN66YGaP39+uvwxY8agVatW2LdvH6pU+f8/5kWKFEF0dHS6srwgARLImcCKPad0DKXTl4JQMSpCrXhrhbpqrzcmEiABErATAUs4S0uWLNFDb4ajJAZo06aNzlu8eHGmzlJmRjpz5oweaitRokS621OnTsWUKVNQvnx53HjjjRg2bBiKF8/6D35iYiLkY6SzZ8/qUxn6k4+3klGXcfRWvWaqx9DNOJpJNm/IYuhlHL1Rp1nqmKWicT8zcwOS1FylmKIOTOnXHBVLRnj1O2AGXQ3bGUczyORNGQy9jKM36zZLXYZuxtEscnlLDkMv4+ites1Sj6GXcfSmXJ7WGaRWrzi82bAv6hoxYgQmTpyoh9Fc669Tpw769u2LoUOHumZnen7p0iVcccUVqFevnnaMjELSQ1W9enXds7RhwwZdV61atTL0Shnl5fjyyy9j+PDhrln6fNq0aZBeKiYSsDMB+Ysx/2AQZu2/vMKtUck09K6dhnAueLOz2akbCdiSgEzZkWk+0pkSGRmZpY5+7VnKyulwlXb58uX6UiZguyfx8zLLdy8nnqNM2k5LS8PYsWPT3Zb5SkaKjY1F7dq1ERcXp+c5NW/e3LiV7ijO2eDBg5150rMUExODzp07Zwvb+YCHJyK3DCV26tQJoaH23JXd7jraTb/k1DQM+3mzcpQO6re4b7uqGHxNdfz+2wLbvqd2s6H7nx+76yf62l1H6uf+Vnt+bYwM5fSEX52lRx99VDsx2QlZrVo1rFu3DkePHs1Q7Pjx43roLMMNlwx5iXr06IHdu3fj999/z9GZEQdJHJPt27cjK2cpPDwc8nFP8pwvnBpf1esuvz+v7a6jHfQ7czEZD09djX92nIREA3i5W0Pc17aac9jNDjpm9x2gftnRscY92tAadspKSl/YT+r0JPnVWSpTpgzkk1OSidzSRfbvv//qCdpSftmyZTqvXbt2WT5uOEri+Pzxxx8oXTrnPak2btyo//hXqFAhy3p5gwQCjcA+teLtP18ux45j51EkLAQf9mqGa+qVDzQM1JcESCBACQRbQe/69evjhhtugAyZyYo4+ch5165d003ulvlI33//vVYpJSUFd9xxB1asWAGZwJ2amoojR47oT1JSki6zc+dOvPLKK7rMnj17MHv2bNx5551o1qwZ2rdvbwU0lJEEfE5guVrxduvYf7SjVD4yHN882JaOks+pswESIAEzEfBrz1JuQIjDM2jQID0vSJ6TeEkffvhhuiokjID0QEk6cOAAfvrpJ33etGlTfTT+kV4mCUYZFhaG3377De+//z7Onz+v5x116dJFr4YLCeFsVYMXj4FL4Du9x9t6JKm5So0qRek93qJViAAmEiABEggkApZxlkqVKpVuFVtmRnJd2CdznVyvMysvk7L//PPPzG4xjwQCmkBamgMj52/Dh3/s0BxuaBiNUT2borAagmMiARIggUAjYBlnKdAMQ31JwF8ELial4qkZazBbxVGSxD3e/GUJtksCJGAWAnSWzGIJykECJiBw7Owl9J+0AusOnEFoiOzx1hi3t6hsAskoAgmQAAn4jwCdJf+xZ8skYCoCGw6e0VuXHD5zCSWLhGJ87zi0ql7KVDJSGBIgARLwBwE6S/6gzjZJwGQE5m08gse/XoOLyamoWbYoPu/TElVLFzWZlBSHBEiABPxDgM6Sf7izVRIwBQFZBPHJX7vw5pwtakEE0KF2GRVDqTmiCnsWqM0USlAIEiABEvAxATpLPgbM6knArARkA9wXfliPb1Yc0CLe26YKXr65IQqFWCL8mlmxUi4SIAEbEqCzZEOjUiUSyInA6QtJGDhlJZbtPqW3LnmpawPc366aR3st5lQ375MACZCA3QjQWbKbRakPCeRAYOfx8+g3cTn2qC1MioUXwhi1dUnHuuVyeIq3SYAESCBwCdBZClzbU/MAJPDPjhN4SPUonb2UgkolCuuJ3HWjiwcgCapMAiRAAp4ToLPkOSuWJAFLE5i6bC+G/bgRKSo6d/MqJfDJfXEoUyzc0jpReBIgARIoCAJ0lgqCMtsgAT8SSFb7ur36yyZMWrJXS3FL04p46/bGiAjl1iV+NAubJgESsBABOksWMhZFJYHcEohPSMLDU1dh8c6T+tGnr6+rty8JCgrKbVUsTwIkQAIBS4DOUsCanorbncCOY+fQ78sV2KsmchdRG+COVhvhdlYb4jKRAAmQAAnkjgCdpdzxYmkSsASBP7Ycw2Nfrcb5xBRULlkYn94fh3rRkZaQnUKSAAmQgNkI0Fkym0UoDwnkg4BE5J6waBfe+PVyRG7Z223cPc1RmhO580GVj5IACQQ6ATpLgf4GUH/bELik9nX77/frMXPVQa3T3a2qYHi3hggrxIjctjEyFSEBEvALATpLfsHORknAuwSOnbuEByevxOp98QgJDoJE5L6vbVVG5PYuZtZGAiQQoAToLAWo4am2fQhsOHgGAyatwOEzl/QGuGPVsFv7WmXsoyA1IQESIAE/E6Cz5GcDsHkSyA+BX9YdwpAZa3EpOQ01yxZVE7lbonqZovmpks+SAAmQAAm4EaCz5AaElyRgBQJpKgr36AXb8MHvO7S4HeuWxft3N0NkRKgVxKeMJEACJGApAnSWLGUuCksCwAUVDuCpb9ZizsYjGscDV9bAszfU03OVyIcESIAESMD7BOgseZ8payQBnxHYc+ICHpi8AtuOnkdYSDBG3NYId7So7LP2WDEJkAAJkABAZ4lvAQlYhMDCrccwSAWaPHspBeWKh+Pj3i3UhrglLSI9xSQBEiAB6xKgs2Rd21HyACEggSY//nMX3p57OdBk8yol8PG9LVAuMiJACFBNEiABEvAvATpL/uXP1kkgWwIJSSl4+tt1mLXusC4ngSZf7tYA4YVCsn2ON0mABEiABLxHgM6S91iyJhLwKoF9agNcmZ+05cg5hIYEKSepIe5pXdWrbbAyEiABEiCBnAnQWcqZEUuQQIET+Hv7CTz61SrEJySjjNrX7eN7myOuWqkCl4MNkgAJkAAJcII33wESMBUBmZ/06aLdaiPczVChlNAkpgTGq/lJ0VGcn2QqQ1EYEiCBgCLAnqWAMjeVNTOBi0mpeG7mOvy45pAW804VEuDVW2MREcr5SWa2G2UjARKwPwE6S/a3MTW0AIEDpxP0RrgbD51FIdkI9+YG6N2GG+FawHQUkQRIIAAI0FkKACNTRXMTWLxTzU+athqnLiShdNEwfKQ2wm1To7S5haZ0JEACJBBABOgsBZCxqaq5CMj8pM//2YMRszcjVU1Qiq0UifG941CpRGFzCUppSIAESCDACdBZCvAXgOr7h4Ds7/bsd+vwy//iJ3VvVglvqK1LOD/JP/ZgqyRAAiSQHQE6S9nR4T0S8AGBncfPY+Dkldh+7Lyen/R8l/ro064agoKCfNAaqyQBEiABEsgvATpL+SXI50kgFwTmbDiCITPW4rzqWZL93caq+UmMn5QLgCxKAiRAAn4gQGfJD9DZZOARSElNw3sLtqg93nZq5VtVL4UPezVTDhPjJwXe20CNSYAErEaAzpLVLEZ5LUfgXDLQ98uVWLr7tJa9/xXV8eyN9dQWJsGW04UCkwAJkEAgEqCzFIhWp84FRmD1/ni8sy4EZ5JOo0hYCN6+ozG6Nq5YYO2zIRIgARIggfwTsMx/bU+fPo3evXsjKipKf+Q8Pj4+WwJ9+vTRk2Zl4qzxadOmTbpnEhMT8dhjj6FMmTIoWrQounXrhgMHDqQrwwsSyC0BCQsweele3PPZcuUoBaFGmSL48ZH2dJRyC5LlSYAESMAEBCzjLPXq1Qtr1qzBnDlz9EfOxWHKKd1www04fPiw8zN79ux0jzzxxBP4/vvv8fXXX+Pvv//G+fPn0bVrV6SmpqYrxwsS8JSAbFvylJrE/eIPG5Cc6kCTUmn49sE2qF2+uKdVsBwJkAAJkICJCFhiGG7z5s3aQVq6dClat26t8U2YMAFt27bF1q1bUbdu3SyRhoeHIzo6OtP7Z86cwWeffYbJkyfjuuuu02WmTJmCmJgYLFiwANdff32mzzGTBLIisPfkBb1tyZYj56B2LcGQzrVR8cxmFI+wxFctK7WYTwIkQAIBTcASPUtLlizRQ2+GoyQWk+E0GZJbvHhxtgZcuHAhypUrhzp16mDAgAE4duyYs/zKlSuRnJyMzp07O/MqVqyI2NjYHOt1PsATEvgfgXkbj6DrmL8hjpJsWzKlf2sMUJO5GT6JrwgJkAAJWJuAJf67e+TIEe3wuKMWJ0juZZVuvPFG3HnnnahatSp2796NF198Eddccw3ESZIeJ3k2LCwMJUuWTFdF+fLls61X5jnJx0hnz57Vp+J4ycdbyajLOHqrXjPVY+hmHM0km6eyJKuwAO/O247PF+/VjzSNicIHPZugQlSE832wsn45cTB0M445lbfafUMv42g1+XOS19DLOOZU3or3Dd2MoxV1yE5mQy/jmF1ZK94z9DKO3tTB0zr96iy9/PLLGD58eLZ6L1++XN/PLLqxTKLNLN+osGfPnsap7i2Ki4vTjtOsWbNw2223Oe+5n+RU7xtvvJGp3PPmzUORIkXcq8v39fz58/Ndh9krsKqOp5XPPHFbCPacV2NuKl1dIQ03VzyJ1f/8jtUu0K2qn4sKOZ7aXUfql+MrYPoCtKHpTZStgL6wX0JCQrZtGjf96iw9+uijuOuuuwxZMj1Wq1YN69atw9GjRzPcP378OKQXyNNUoUIF7Sxt375dPyJzmZKSkiAr7Vx7l2Sorl27dllWO3ToUAwePNh5X3qWZJ6TDOdFRkY68/N7Ih6vvBydOnVCaGhofqsz5fNW1vGv7Sfw8rfrcTohWc9JerN7Q3RukP59tLJ+nr4wdteR+nn6Jpi3HG1oXtt4Ipkv7WeMDOUkh1+dJVmuL5+ckkzklsnY//77L1q1aqWLL1u2TOdl59S413vy5Ens378f4jRJatGihXZCxCHp0aOHzpOVcxs2bMDbb7+trzP7R4bw5OOexKHxhVPjq3rd5ffntZV0lGjcoxdsx4d/7NDIYitFYmyvFqhSOuteRSvpl9f3wO46Ur+8vhnmeY42NI8t8iKJL+wndXqSgj0p5O8y9evXh4QAkAnasiJOPnIuS/xdV8LVq1dPhwEQeSUEwJAhQyCTw/fs2QOZ6H3zzTdr56x79+5aJZkg3q9fPzz11FP47bffsHr1atx7771o1KiRc3Wcv3Vn++YicOzsJdz72TKno9S7TVV8O7Bdto6SuTSgNCRAAiRAArkl4NeepdwIO3XqVAwaNMi5ck2CR3744YfpqpAwAtIDJSkkJATr16/HpEmTdPBK6U3q2LEjpk+fjuLF/z/ezahRo1CoUCHds3Tx4kVce+21mDhxon4+XeW8CHgCi3eewKCv1uDE+UQUVdG437i9Mbo1qRjwXAiABEiABOxOwDLOUqlSpSAxkLJLMjHbSIULF8bcuXONyyyPERERGDNmjP5kWYg3AppAWpoDH6kht1ELtkGdol50cXx0T3PULFssoLlQeRIgARIIFAKWcZYCxSDU01wEpBdp8Ddr8de241qwnnExeLlbQxRWPUtMJEACJEACgUGAzlJg2Jla5oHA4h0n8MT0NTh2LhERocF47dZGuKNF5TzUxEdIgARIgASsTIDOkpWtR9l9QsBY7fbRwh2Qkd3a5Yrhw17NUVcNvzGRAAmQAAkEHgE6S4Fnc2qcDYGD8Rfx+FersWLvaV3q7lYxeKkrh92yQcZbJEACJGB7AnSWbG9iKugpgTkbjuDZ79bhzEUVZDK8kFrt1ghdG3O1m6f8WI4ESIAE7EqAzpJdLUu9PCZwKTkVI2ZvxqQle/UzTWJK4MO7myGmVNZBJj2unAVJgARIgAQsT4DOkuVNSAXyQ2DHsfN4dNoqbDlyTlfz4JU18FTnuggrZIl4rflRnc+SAAmQAAl4SIDOkoegWMxeBCQm14yVBzDsx424qHqWShcNw8ieTXFVnbL2UpTakAAJkAAJ5JsAnaV8I2QFViNw7lIyXvhhA35cc0iL3r5WaYzq0RTlIiOspgrlJQESIAESKAACdJYKADKbMA+BlWqV2xPTV2P/qYsICQ7C4E518NBVNRGszplIgARIgARIIDMCdJYyo8I82xGQ2Eljft+hN8BNVXuWVCpRGB/c3RQtqpayna5UiARIgARIwLsE6Cx5lydrMyGBfScTdG/Sqn3xWrpbm1bEK7fGIjIi1ITSUiQSIAESIAGzEaCzZDaLUB6vEZBJ3DNXHcRLP27AhaRUHTvpte6xuKVpJa+1wYpIgARIgATsT4DOkv1tHJAanklIxn9/WI9Z6w5r/VtVK6VWuzVB5ZKMnRSQLwSVJgESIIF8EKCzlA94fNScBJbsPInB36zB4TOXUEhN3H5STeIeqCZxy4RuJhIgARIgARLILQE6S7klxvKmJZCUkoaR87dh/F879Qa41csUxWgVO0kicjORAAmQAAmQQF4J0FnKKzk+ZyoCW1UEbulN2njorJbrrpYxeLFrAxRVe7wxkQAJkAAJkEB+CPCXJD/0+KzfCUgYgE8X7cJ787YhSYUHKFEkFG/e1hg3xEb7XTYKQAIkQAIkYA8CdJbsYceA1EJCAjw1Yw2W7zmt9b+mXjnlKDViJO6AfBuoNAmQAAn4jgCdJd+xZc0+IiAhAab9uw+vz9qMBBUSoGhYCF66uQF6xMUgKIiTuH2EndWSAAmQQMASoLMUsKa3puJHz17CM9+uw5/bjmsFWlcvhXfvbIKYUgwJYE2LUmoSIAESMD8BOkvmtxEl/B+Bn9YewotqA9wzF5MRVigYz1xfF/9pX537uvENIQESIAES8CkBOks+xcvKvUHg9IUkvKCicBsBJhtVisLIHk1Qu3xxb1TPOkiABEiABEggWwJ0lrLFw5v+JjBnwxG8oHqTTpxP1EElH7umFh7pWAuhIcH+Fo3tkwAJkAAJBAgBOksBYmirqXlSOUcv/bTR2ZtUq1wx3ZvUuHIJq6lCeUmABEiABCxOgM6SxQ1oN/Flpdsvaj+3YcpROqWG32SLkgevrIFB19ZGRGiI3dSlPiRAAiRAAhYgQGfJAkYKFBGPnbukJ3DP3XhUq1wvujjeuaMJGlWOChQE1JMESIAESMCEBOgsmdAogSaS6kzCD2sO4bXZW/VKN9n89lE1N+nhq2vpVW+BxoP6kgAJkAAJmIsAnSVz2SPgpDmi4iZ9siUYm5Zu0LrHVorUvUn1K0QGHAsqTAIkQAIkYE4CdJbMaRfbSyVzk75ZsR+v/rIZ5xOD1eq2IDxxXR08oOYncaWb7c1PBUmABEjAUgToLFnKXPYQdtfx8/jv9+uxdNcprVDVYg6M69sODSqVtIeC1IIESIAESMBWBOgs2cqc5lYmKSUN4//ciTF/7ICcR4QG43E1Nyn6zCbUVqEBmEiABEiABEjAjAToLJnRKjaUacWeUxg6cz22HzuvtbuyTlm8fmssoouHYvbsTTbUmCqRAAmQAAnYhQCdJbtY0qR6yD5ub8/ZgqnL9mkJSxcNw0s3N0C3JhURFBSE5ORkk0pOsUiABEiABEjgMgE6S3wTfEJAJnD/qrYqkeCSx88l6jZ6xsVg6E31UKJImE/aZKUkQAIkQAIk4AsCdJZ8QTXA6zwUfxEvqY1vF2w+pknUKFMUr3dvhLY1Swc4GapPAiRAAiRgRQJ0lqxoNZPKnJyahon/7MHoBdtwISlVhwN4SAWWfPjqmtyqxKQ2o1gkQAIkQAI5E6CzlDMjlvCAwNJdJ3Vv0raj53XpuKol8cZtjVC7fHEPnmYREiABEiABEjAvATpL5rWNJSST/dzemL0F368+qOUtpSZwP3dDPdzRojKC1bYlTCRAAiRAAiRgdQLBVlHg9OnT6N27N6KiovRHzuPj47MVX1ZbZfZ55513nM9dffXVGcrcddddzvs8yZxAihpy++Kf3bj23T+1o6RQ457WVfD7U1ehR8sYOkqZY2MuCZAACZCABQlYpmepV69eOHDgAObMmaMxP/DAA9p5+vnnn7PEfvjw4XT3fv31V/Tr1w+33357uvwBAwbglVdeceYVLlzYec6TjARW7j2FF37YiM2Hz+qbjStH4dVbYtEkpkTGwswhARIgARIgAYsTsISztHnzZu0kLV26FK1bt9bIJ0yYgLZt22Lr1q2oW7dupmaIjo5Ol//jjz+iY8eOqFGjRrr8IkWKwL1sugK80AROnE/EW79uwYyVB/R1VOFQPHNDXdzVsgpCOOTGt4QESIAESMCmBCwxDLdkyRI99GY4SmKLNm3a6LzFixd7ZJqjR49i1qxZumfJ/YGpU6eiTJkyaNiwIYYMGYJz5865Fwnoa9maZMJfu9DxnYVOR0liJsmQ2z2tq9JRCui3g8qTAAmQgP0JWKJn6ciRIyhXrlwGa0ie3PMkffnllyhevDhuu+22dMXvueceVK9eXfcsbdiwAUOHDsXatWsxf/78dOVcLxITEyEfI509e3k4SqJRezMitVGXcTTaK6ijBJZcuO0ERvy6FXtOJuhmYytG4qUu9dCsSgl9nV/ZjOeNY0HpVlDtGHoZx4JqtyDbMXQzjgXZdkG0ZehlHAuizYJsw9DLOBZk2wXVlqGbcSyodguqHUMv41hQ7RZUO4ZextGb7XpaZ5D6QXR4s+Hc1PXyyy9j+PDh2T6yfPlyzJs3D+LsyJCba6pdu7buKXruuedcszM9r1evHjp16oQxY8Zket/IXLlyJeLi4iDH5s2bG9npjlnJPW3aNMiQnh3SEeUb/bA3GJvjg7U6xUMd6FolDa3KOsARNztYmDqQAAmQAAkkJCRA5kSfOXMGkZGRWQLxa8/So48+ipxWnlWrVg3r1q2DDKO5p+PHj6N8+fLu2RmuFy1apB2t6dOnZ7jnniEOUmhoKLZv356lsyS9T4MHD3Y+Kj1LMTEx6Ny5c7awnQ94eCIer/RwiZMnMhVEkr3cPvxjJ6as34+UNIcOLNmnbVU8dFUNFI/w/uviDx0LgqPRht31Ez3triP1M95m6x5pQ+vaTiT3pf2MkaGcCHn/1y+nFl3uyzwh+eSUZCK3eH3//vsvWrVqpYsvW7ZM57Vr1y6nx/HZZ5+hRYsWaNKkSY5lN27cqA1ToUKFLMuGh4dDPu5JHBpfODW+qtdVfgkFMH3Ffrw3bxtOXUjStzo1KI/nb6qPamq7El+ngtDR1zpkV7/d9RPd7a4j9cvuDbfGPdrQGnbKSkpf2E/q9CT51VnyREApU79+fdxwww2QJf7jx4/Xj0nogK5du6ZbCSdDbW+88Qa6d+/urFq8xhkzZuC9995z5hknO3fuhEzuvummm7TTtmnTJjz11FNo1qwZ2rdvbxSz9VFGYf/YegxvqlVuRvTt2uWK4aWbG6BD7bK21p3KkQAJkAAJkIAnBCzhLIki4tQMGjRID3XJdbdu3fDhhx/KqTPJnCbpgXJNX3/9NcQhuPvuu12z9XlYWBh+++03vP/++zh//rweSuvSpQuGDRuGkJCQDOXtlrHh4Bm8PmszlqitSiSVKBKKJ66tjXvbVEWhkMtzleymM/UhARIgARIggdwSsIyzVKpUKUyZMiVb/TKbqy49UPLJLMk8oz///DOzW7bOO3A6Ae/O3Yof1hzSeoYVCkbfdtXwcMdakNhJTCRAAiRAAiRAAv9PwDLO0v+LzLO8EpDJ22P/2IEvFu+BxE6S1L1ZJTzVuQ4ql7THKr68suFzJEACJEACJJAVATpLWZGxUX5iSiqmLN2HMb9vR3xCstasXc3S+K+avB1bKcpGmlIVEiABEiABEvA+ATpL3mdqmhplhdvM1Qfx/oLtOBh/Ucslk7fFSbq6blm9gbBphKUgJEACJEACJGBSAnSWTGqY/IiVpuIj/brhCN6bvxW7jl/QVZWPDMcT19XBnS0qc/J2fuDyWRIgARIggYAjQGfJRiaXCe4Ltx7HO2ry9qbDZ7VmJdUKt4evroXeKrBkRKj9V/jZyJxUhQRIgARIwCQE6CyZxBD5FWOZWv4vTtKKvad1VcXCC6F/h+rod0V1FXmbK9zyy5fPkwAJkAAJBC4BOksWt/3qfacxSs1J+mvbca1JuAoD0EeFARh4VU2ULBpmce0oPgmQAAmQAAn4nwCdJf/bIE8SrNx7CqOVk7Ro+wn9fCG1u+1drWLw2DW1UT4yIk918iESIAESIAESIIGMBOgsZWRi6hwZbvtAhQD4Z8flqNshykmSWEmDlJNUpTRjJZnaeBSOBEiABEjAkgToLFnAbDJxe/HOEzoEwLLdp7TE0pN0Z1xlPXk7phSdJAuYkSKSAAmQAAlYlACdJRMbTpykrfFBmPzZcjVxO15LGhoShB5xMXjo6pqMum1i21E0EiABEiAB+xCgs2RiWw6avg5zNsty/3iEqY1tZU6STNyuWKKwiaWmaCRAAiRAAiRgLwJ0lkxsz7iqJfDbpiPo1aaq6kmqjegoTtw2sbkoGgmQAAmQgE0J0FkysWF7qjlJYUc34u6b6iE0lLGSTGwqikYCJEACJGBjAsE21s3yqknE7SiGSrK8HakACZAACZCAtQnQWbK2/Sg9CZAACZAACZCAjwnQWfIxYFZPAiRAAiRAAiRgbQJ0lqxtP0pPAiRAAiRAAiTgYwJ0lnwMmNWTAAmQAAmQAAlYmwCdJWvbj9KTAAmQAAmQAAn4mACdJR8DZvUkQAIkQAIkQALWJkBnydr2o/QkQAIkQAIkQAI+JkBnyceAWT0JkAAJkAAJkIC1CdBZsrb9KD0JkAAJkAAJkICPCdBZ8jFgVk8CJEACJEACJGBtAnSWrG0/Sk8CJEACJEACJOBjAnSWfAyY1ZMACZAACZAACVibAJ0la9uP0pMACZAACZAACfiYQCEf1x8Q1TscDq3n2bNnvapvcnIyEhISIPWGhoZ6tW6zVGZ3He2un7xHdteR+pnlr0Xe5aAN887ODE/60n7G77bxO56VvnSWsiKTi/xz587p0jExMbl4ikVJgARIgARIgATMQEB+x6OiorIUJUh5U5e7RbIswhs5EUhLS8OhQ4dQvHhxBAUF5VTc4/vi8YoDtn//fkRGRnr8nJUK2l1Hu+sn75rddaR+VvqLkrmstGHmXKyS60v7iQskjlLFihURHJz1zCT2LHnhbRHAlStX9kJNmVchjpJdnSVDY7vraHf9xI5215H6Gd9W6x5pQ+vaTiT3lf2y61EyiGXtRhkleCQBEiABEiABEiCBACZAZymAjU/VSYAESIAESIAEciZAZylnRn4rER4ejmHDhkGOdk1219Hu+sl7aXcdqZ/1//rQhta2oRnsxwne1n6HKD0JkAAJkAAJkICPCbBnyceAWT0JkAAJkAAJkIC1CdBZsrb9KD0JkAAJkAAJkICPCdBZ8jFgVk8CJEACJEACJGBtAnSWrG0/Sk8CJEACJEACJOBjAnSWfAw4p+pff/11tGvXDkWKFEGJEiVyKq7vS8TRl19+WUccLVy4MK6++mps3Lgx3bOJiYl47LHHUKZMGRQtWhTdunXDgQMH0pUpiIvTp0+jd+/eOoy8BP6S8/j4+GyblijomX3eeecd53Ois3uZu+66y3m/oE7yol+fPn0yyN6mTZt0IpvFfiJUbnWUfZyeffZZNGrUSL97Ehn3vvvu01HuXZX0lw3Hjh2L6tWrIyIiAi1atMCiRYtcxcpw/ueff+pyUr5GjRr4+OOPM5T57rvv0KBBA70yUI7ff/99hjIFmZEbHWfOnIlOnTqhbNmyOuhf27ZtMXfu3HTiTpw4McM7K9+/S5cupStXUBe50W/hwoWZyr5ly5Z04prJhrnRL7O/J2Kbhg0bOvUzk/3++usv3Hzzzfr3S+T84YcfnHJmdWKK76Bsd8LkPwIvvfSSY+TIkY7Bgwc7lDPhkSBvvvmmQ22t4lBfbsf69esdPXv2dFSoUMGhQsI7nx84cKCjUqVKjvnz5ztWrVrl6Nixo6NJkyaOlJQUZ5mCOLnhhhscsbGxjsWLF+uPnHft2jXbpg8fPuxw/Xz++ecO9aVy7Ny50/ncVVdd5RgwYEC6csoJc94vqJO86Hf//fc75DlXHU+ePJlOZLPYT4TKrY5ih+uuu84xffp0h/pBcixZssTRunVrh3JM0unoDxt+/fXXDrUptWPChAmOTZs2OR5//HGH+s+EY+/evelkMy527drlUP+R0eWkvDwnz3/77bdGEf1eh4SEOEaMGOHYvHmzPhYqVMixdOlSZ5mCPMmtjsLgrbfecvz777+Obdu2OYYOHap1lL8bRvriiy8cKnpyundW3l9/pNzq98cff8iWXo6tW7emk9/1b6H8fTKLDXOrn3zfXP+WqO2xHKVKlXKosDNO85jJfrNnz3Y8//zz+vdL7KL+Y+GUM7MTs3wHkZlwzCt4AvIye+IsqX3oHNHR0Q5xmIyk/nenn1X/49VZ8uWRP+jypTPSwYMHHWpbFsecOXOMLJ8f5cdFvgyuPxrywyl58iPqabrlllsc11xzTbri8kMrf+T9mfKqnzhLolNWySz2E/nyqqO7bvJDLHZ3dUr8YcNWrVo5xBF1TfXq1XM899xzrlnO82eeecYh913Tgw8+6FA9gc6sHj16aIfSmaFOrr/+eofq6XTNKrDz3OqYmWCqd8wxfPhw5y1P/z45H/DhSW71M5wl1UOapVRmsmFu9XNXSpwP+c/lnj17nLfMZD+nUOrEE2fJLN9BDsMpa1kp7d69G0eOHEHnzp2dYkvALvXDA/W/I523cuVKyFCIaxkZClG9Os4yzod9eKIcIz38pnoVnK3IcJNyCj2W4+jRo5g1axb69evnrMM4mTp1qh5mlO7mIUOG6M0QjXsFccyPfjI0UK5cOdSpUweqhwzHjh1zimwW+4lA+dHRqZA6OXPmjB4KcR9qLkgbJiUlQdi6fi9ERrk2vjuuMsu56O9eXjlCWLFihf6OZVcmqzrd2/DmdV50dG9fNgaXjUVV70S6W+fPn0fVqlX1PpiqdxirV69Od78gLvKjX7NmzaB64HHttddCOVDpxM3KzgVtw/zoZyj02WefQfXsalsZeXI0g/1c5fH0PCvbFPR3kBvpemoxk5QTR0lS+fLl00kk1+p/7TpPyoSFhaFkyZIZyhjPp7vhowtpSxwC9yR5nsrx5ZdfQg054rbbbktXzT333KPnnaheNmzYsAFq6ABr166FGnZMV86XF3nV78Ybb8Sdd96p/5iJ8/viiy9C9ZzpH3JxfKVeM9hP2OVVR1fuMq9F9dygV69e6TaELmgbnjhxAqmpqZl+d7J6HyU/s++aGsKB1Cc/vlmVyapOVzbePs+Lju4yvPfee7hw4QJUb4vzlupdg8x7kXlosgP8+++/j/bt2+vvXO3atZ3lfH2SF/3ERp988omedyZzASdPnqwdJvkPy5VXXqlFNosN86KfK3M1HIdff/0V06ZNc82GWeyXTigPL7KyTUF/B+kseWiw3BSTydeqCzvbR5YvX464uLhsy2R3UybGuSbp0XTPc70v556UcX8ms2tP9ZNnM5MpN3Ko+UqQH1WZXOuapDfGSNJjJn+whaeaZ4HmzZsbt/J09LV+ao6ZUy6RXeSW/7FLD5q7U+gsqE5yw831uczOfa2j0ab0cMrEe+mtkEmrrsmXNnRtx/3c/Z3MiWtm5aVO13zXc7mXU51Sxpcpr/J89dVXkHfjxx9/TPcfHekRdl2EII6SfM/GjBmDDz74wJeqZFp3bvSrW7cu5GMkmcCu5vXg3XffdTpLci83dRp1+eqYV1nEoZXe21tvvTWdaGazXzrhPLjIjIc85prvei73vP0dpLMkVL2cHn30Uf0DkV211apVy+52lvekJ0WSeNvyPyYjyTCO8T9gKSPdubKKybV3ScrIyrv8Jk/1W7duHWQYzT0dP37cKav7PddrWaWkJmVCTRR2zc70XP5wq3la2L59e76dpYLSz1BE7CjOksguydf2kzYKQkdxlKR3QnrPfv/993S9SiKDe/KmDd3rlmtZGaom8ervjut91++Oa76ciy3ce4ikvJrAjdKlS+viWZUxvo/udfryOi86GvLI90yGu2fMmKGHcYz8zI5q/iNatmzpfGczK+OLvPzo5yqPOA9TpkxxZpnFhvnRT5wD+c+lrDiWnunskr/sl51MWd3LyjYF/h1UgJlMQMDTCXjGBG9ZvWIk1bWc6QRvWY1kpEOHDvltgveyZcsMMfRkb/Wl8GiCt0yEdl9B5azI7URWBUq9aomp2x3fXRqTn/OqnyGZ6np3qOE3hxpy1FnGBG9/20+EyauOyll3qP/dOtR8ModyLgxVsz0WhA1l8uxDDz2UTo769etnO8Fb7rsmmSCufmydWTI5WA2tOq/lRFYQ+nOCd250FHnVsI1D9d7muDJJykqSv0OqR9TRt2/fyxkF+G9ubZiZaLfffrteIWzcM5MN86qfMZFdvkc5JX/az1U2+Zud02o4meBthu+gdFUx+ZGArA5SEyX1ypNixYrpc7lWEyydUqkuZIeKheK8lpVwapK0zpMvxt13351p6IDKlSs7FixYoEMHyGoyf4UOaNy4sV4+ribqOdSchwyhA9z1E0XVhGC9ZHvcuHFOvY2THTt2aF5qKNOhei0cavhKr1hSEzj9EhohN/qJXZ966im93Fxklz9walhAh3lwD/1gBvsJc/nhz42OqkfJoeJ6OUT+NWvWpFvWLI69JH/Z0FiWrSbBakfwiSee0KEDjJVDsipO/c9cyyj/GMuWn3zySV1ennMPHfDPP//oZefyvZTQAXI0Q+gAT3UUR0nk/eijj9LZSpx2I6mhOb2SVsJ3yN8ncZLkGdf/KBhlfX3MrQ1HjRqlf5AlLIKa36gdY/mRltArRjKTDXOrn6HDvffeq0N0GNeuRzPZT/4GyjskH7GDhM6Rc2OlrFm/g3SWXN8oP5xL74m8MO4f+RE1ktyTnicjyf8KJIaG6p7UPRJqkqKOt2Tcl+PFixcdaqhFx9tQgSu1g7Jv3z7XIgVyLvGD1JwjHRdKYkPJufsSXnf9RLDx48c7RG7XP9iGwKKH6CyxRFR3s6NmzZqOQYMGOdxjFRnlfXnMrX4JCQkOtbrKoQIA6h/dKlWqOOQdcLeNWewn7HKroziB7u+zcW281/60oTgFathTvztq6C9db6TYQkIauCY1Edghjri8a2r43JGZA6+Grhzi9IsjJaEGXH+IXesqqPPc6Cj6GvZxPQoLI4lTKe+qMJB3V95htVLMuF3gx9zoJ73w8jdCes7UtATHFVdcof+D5S60mWyYG/1ED/k7KX8v1UR2d7X0tZnsJ38DXN8z49x438z6HQwSkkpYJhIgARIgARIgARIggUwIBGeSxywSIAESIAESIAESIIH/EaCzxFeBBEiABEiABEiABLIhQGcpGzi8RQIkQAIkQAIkQAJ0lvgOkAAJkAAJkAAJkEA2BOgsZQOHt0iABEiABEiABEiAzhLfARIgARIgARIgARLIhgCdpWzg8BYJkAAJkAAJkAAJ0FniO0ACJEACJEACJEAC2RCgs5QNHN4iARIgARIgARIgATpLfAdIgARIgARIgARIIBsCdJaygcNbJEACgUng+PHjUHsvYsSIEU4AatNYqL3RMG/ePGceT0iABAKDAPeGCww7U0sSIIFcEpg9ezZuvfVWqA1joTbHhdpMF126dMHo0aNzWROLkwAJWJ0AnSWrW5DykwAJ+IzAI488ggULFqBly5ZYu3Ytli9fDrV7vc/aY8UkQALmJEBnyZx2oVQkQAImIHDx4kXExsZi//79WLFiBRo3bmwCqSgCCZBAQRPgnKWCJs72SIAELENg165dOHToENLS0rB3717LyE1BSYAEvEuAPUve5cnaSIAEbEIgKSkJrVq1QtOmTfWcpZEjR2L9+vUoX768TTSkGiRAAp4SoLPkKSmWIwESCCgCTz/9NL799ls9V6lYsWLo2LEjihcvjl9++SWgOFBZEiABgMNwfAtIgARIwI3AwoUL9aq3yZMnIzIyEsHBwZDzv//+G+PGjXMrzUsSIAG7E2DPkt0tTP1IgARIgARIgATyRYA9S/nCx4dJgARIgARIgATsToDOkt0tTP1IgARIgARIgATyRYDOUr7w8WESIAESIAESIAG7E6CzZHcLUz8SIAESIAESIIF8EaCzlC98fJgESIAESIAESMDuBOgs2d3C1I8ESIAESIAESCBfBOgs5QsfHyYBEiABEiABErA7ATpLdrcw9SMBEiABEiABEsgXATpL+cLHh0mABEiABEiABOxOgM6S3S1M/UiABEiABEiABPJFgM5SvvDxYRIgARIgARIgAbsToLNkdwtTPxIgARIgARIggXwR+D9mO+wPsVBycwAAAABJRU5ErkJggg==", "text/plain": [ "" ] diff --git a/docs/docs/integrations/tools/bash.ipynb b/docs/docs/integrations/tools/bash.ipynb index f71526f636d..b01f070926f 100644 --- a/docs/docs/integrations/tools/bash.ipynb +++ b/docs/docs/integrations/tools/bash.ipynb @@ -16,7 +16,17 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, + "id": "a83d2ea9", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, + { + "cell_type": "code", + "execution_count": null, "id": "f7b3767b", "metadata": { "tags": [] diff --git a/docs/docs/integrations/tools/bearly.ipynb b/docs/docs/integrations/tools/bearly.ipynb index 334c8ad4ecb..72b6b15e518 100644 --- a/docs/docs/integrations/tools/bearly.ipynb +++ b/docs/docs/integrations/tools/bearly.ipynb @@ -12,6 +12,16 @@ "Get your api key here: https://bearly.ai/dashboard/developers" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "8265cf7f", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "markdown", "id": "3f99f7c9", diff --git a/docs/docs/integrations/tools/bing_search.ipynb b/docs/docs/integrations/tools/bing_search.ipynb index b0d59ac6ccb..1a54765d51f 100644 --- a/docs/docs/integrations/tools/bing_search.ipynb +++ b/docs/docs/integrations/tools/bing_search.ipynb @@ -18,6 +18,15 @@ "Then we will need to set some environment variables." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 20, diff --git a/docs/docs/integrations/tools/brave_search.ipynb b/docs/docs/integrations/tools/brave_search.ipynb index c78380267a5..2afe244dd4f 100644 --- a/docs/docs/integrations/tools/brave_search.ipynb +++ b/docs/docs/integrations/tools/brave_search.ipynb @@ -11,6 +11,16 @@ "Go to the [Brave Website](https://brave.com/search/api/) to sign up for a free account and get an API key." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "2d7e7b3d", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/docs/docs/integrations/tools/chatgpt_plugins.ipynb b/docs/docs/integrations/tools/chatgpt_plugins.ipynb index c69ac2664ee..9821c12c2f7 100644 --- a/docs/docs/integrations/tools/chatgpt_plugins.ipynb +++ b/docs/docs/integrations/tools/chatgpt_plugins.ipynb @@ -14,6 +14,16 @@ "Note 2: There are almost certainly other ways to do this, this is just a first pass. If you have better ideas, please open a PR!" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "70d493c8", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/docs/docs/integrations/tools/connery.ipynb b/docs/docs/integrations/tools/connery.ipynb index 43228f56f38..af46b518e87 100644 --- a/docs/docs/integrations/tools/connery.ipynb +++ b/docs/docs/integrations/tools/connery.ipynb @@ -40,6 +40,15 @@ "Here, we use the ID of the **Send email** action from the [Gmail](https://github.com/connery-io/gmail) plugin." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 11, diff --git a/docs/docs/integrations/tools/dalle_image_generator.ipynb b/docs/docs/integrations/tools/dalle_image_generator.ipynb index d6a6515110c..1386304cc64 100644 --- a/docs/docs/integrations/tools/dalle_image_generator.ipynb +++ b/docs/docs/integrations/tools/dalle_image_generator.ipynb @@ -19,7 +19,7 @@ "outputs": [], "source": [ "# Needed if you would like to display images in the notebook\n", - "%pip install --upgrade --quiet opencv-python scikit-image" + "%pip install --upgrade --quiet opencv-python scikit-image langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/dataforseo.ipynb b/docs/docs/integrations/tools/dataforseo.ipynb index ba96e920a0e..f01d3423f9f 100644 --- a/docs/docs/integrations/tools/dataforseo.ipynb +++ b/docs/docs/integrations/tools/dataforseo.ipynb @@ -13,6 +13,15 @@ "This notebook demonstrates how to use the [DataForSeo API](https://dataforseo.com/apis) to obtain search engine results. " ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/docs/docs/integrations/tools/dataherald.ipynb b/docs/docs/integrations/tools/dataherald.ipynb index bfb9bf35b2a..ee5d121d345 100644 --- a/docs/docs/integrations/tools/dataherald.ipynb +++ b/docs/docs/integrations/tools/dataherald.ipynb @@ -31,7 +31,8 @@ }, "outputs": [], "source": [ - "pip install dataherald" + "pip install dataherald\n", + "%pip install --upgrade --quiet langchain-community" ] }, { @@ -114,4 +115,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/docs/integrations/tools/ddg.ipynb b/docs/docs/integrations/tools/ddg.ipynb index e974f548e87..641f7357919 100644 --- a/docs/docs/integrations/tools/ddg.ipynb +++ b/docs/docs/integrations/tools/ddg.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet duckduckgo-search" + "%pip install --upgrade --quiet duckduckgo-search langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/e2b_data_analysis.ipynb b/docs/docs/integrations/tools/e2b_data_analysis.ipynb index 3c61e5a0357..769e6a7fc78 100644 --- a/docs/docs/integrations/tools/e2b_data_analysis.ipynb +++ b/docs/docs/integrations/tools/e2b_data_analysis.ipynb @@ -46,7 +46,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain e2b" + "%pip install --upgrade --quiet langchain e2b langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/edenai_tools.ipynb b/docs/docs/integrations/tools/edenai_tools.ipynb index 3126a5c7f28..dc19ba7507f 100644 --- a/docs/docs/integrations/tools/edenai_tools.ipynb +++ b/docs/docs/integrations/tools/edenai_tools.ipynb @@ -42,6 +42,15 @@ "Once we have a key we'll want to set it as the environment variable ``EDENAI_API_KEY`` or you can pass the key in directly via the edenai_api_key named parameter when initiating the EdenAI tools, e.g. ``EdenAiTextModerationTool(edenai_api_key=\"...\")``" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 1, diff --git a/docs/docs/integrations/tools/eleven_labs_tts.ipynb b/docs/docs/integrations/tools/eleven_labs_tts.ipynb index 66a1fa1e116..ff70757d258 100644 --- a/docs/docs/integrations/tools/eleven_labs_tts.ipynb +++ b/docs/docs/integrations/tools/eleven_labs_tts.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet elevenlabs" + "%pip install --upgrade --quiet elevenlabs langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/exa_search.ipynb b/docs/docs/integrations/tools/exa_search.ipynb index db3efad940e..b1388a3f6fe 100644 --- a/docs/docs/integrations/tools/exa_search.ipynb +++ b/docs/docs/integrations/tools/exa_search.ipynb @@ -50,10 +50,10 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain-exa\n", + "%pip install --upgrade --quiet langchain-exa \n", "\n", "# and some deps for this notebook\n", - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain langchain-openai langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_cloud_texttospeech.ipynb b/docs/docs/integrations/tools/google_cloud_texttospeech.ipynb index de44c5b10aa..11f1a13ae18 100644 --- a/docs/docs/integrations/tools/google_cloud_texttospeech.ipynb +++ b/docs/docs/integrations/tools/google_cloud_texttospeech.ipynb @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet google-cloud-text-to-speech" + "%pip install --upgrade --quiet google-cloud-text-to-speech langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_drive.ipynb b/docs/docs/integrations/tools/google_drive.ipynb index 544411db67b..d3d216a733b 100644 --- a/docs/docs/integrations/tools/google_drive.ipynb +++ b/docs/docs/integrations/tools/google_drive.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet google-api-python-client google-auth-httplib2 google-auth-oauthlib" + "%pip install --upgrade --quiet google-api-python-client google-auth-httplib2 google-auth-oauthlib langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_finance.ipynb b/docs/docs/integrations/tools/google_finance.ipynb index 9f983e8bddb..315b4dae78f 100644 --- a/docs/docs/integrations/tools/google_finance.ipynb +++ b/docs/docs/integrations/tools/google_finance.ipynb @@ -32,7 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet google-search-results" + "%pip install --upgrade --quiet google-search-results langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_jobs.ipynb b/docs/docs/integrations/tools/google_jobs.ipynb index 021159df69d..855fb4ec3f2 100644 --- a/docs/docs/integrations/tools/google_jobs.ipynb +++ b/docs/docs/integrations/tools/google_jobs.ipynb @@ -59,7 +59,7 @@ } ], "source": [ - "%pip install --upgrade --quiet google-search-results" + "%pip install --upgrade --quiet google-search-results langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_lens.ipynb b/docs/docs/integrations/tools/google_lens.ipynb index b9729c9337b..00a6c47e6c9 100644 --- a/docs/docs/integrations/tools/google_lens.ipynb +++ b/docs/docs/integrations/tools/google_lens.ipynb @@ -39,7 +39,7 @@ } ], "source": [ - "%pip install --upgrade --quiet requests" + "%pip install --upgrade --quiet requests langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_places.ipynb b/docs/docs/integrations/tools/google_places.ipynb index 6c8adbc702e..f5a0247a2f5 100644 --- a/docs/docs/integrations/tools/google_places.ipynb +++ b/docs/docs/integrations/tools/google_places.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet googlemaps" + "%pip install --upgrade --quiet googlemaps langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_scholar.ipynb b/docs/docs/integrations/tools/google_scholar.ipynb index a94714783be..a2fc92b4eb4 100644 --- a/docs/docs/integrations/tools/google_scholar.ipynb +++ b/docs/docs/integrations/tools/google_scholar.ipynb @@ -28,7 +28,7 @@ } ], "source": [ - "%pip install --upgrade --quiet google-search-results" + "%pip install --upgrade --quiet google-search-results langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/google_search.ipynb b/docs/docs/integrations/tools/google_search.ipynb index beac41eb59b..f916bd19fcd 100644 --- a/docs/docs/integrations/tools/google_search.ipynb +++ b/docs/docs/integrations/tools/google_search.ipynb @@ -14,6 +14,16 @@ "Then we will need to set some environment variables." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2998f9c", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 1, diff --git a/docs/docs/integrations/tools/google_serper.ipynb b/docs/docs/integrations/tools/google_serper.ipynb index fc83b6c3bfe..3ea5265635f 100644 --- a/docs/docs/integrations/tools/google_serper.ipynb +++ b/docs/docs/integrations/tools/google_serper.ipynb @@ -10,6 +10,16 @@ "This notebook goes over how to use the `Google Serper` component to search the web. First you need to sign up for a free account at [serper.dev](https://serper.dev) and get your api key." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac0b9ce6", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 11, diff --git a/docs/docs/integrations/tools/google_trends.ipynb b/docs/docs/integrations/tools/google_trends.ipynb index c5a6ff81d48..c0893a93442 100644 --- a/docs/docs/integrations/tools/google_trends.ipynb +++ b/docs/docs/integrations/tools/google_trends.ipynb @@ -40,7 +40,7 @@ } ], "source": [ - "%pip install --upgrade --quiet google-search-results" + "%pip install --upgrade --quiet google-search-results langchain_community" ] }, { diff --git a/docs/docs/integrations/tools/gradio_tools.ipynb b/docs/docs/integrations/tools/gradio_tools.ipynb index 0d5961e4d6d..8c8ad5772d7 100644 --- a/docs/docs/integrations/tools/gradio_tools.ipynb +++ b/docs/docs/integrations/tools/gradio_tools.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet gradio_tools" + "%pip install --upgrade --quiet gradio_tools langchain-community" ] }, { diff --git a/docs/docs/integrations/tools/graphql.ipynb b/docs/docs/integrations/tools/graphql.ipynb index a33ad9b232a..bc0ded37bd0 100644 --- a/docs/docs/integrations/tools/graphql.ipynb +++ b/docs/docs/integrations/tools/graphql.ipynb @@ -30,6 +30,19 @@ "pip install httpx gql > /dev/null" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/docs/docs/integrations/tools/huggingface_tools.ipynb b/docs/docs/integrations/tools/huggingface_tools.ipynb index a1ef70532fb..8408d373891 100644 --- a/docs/docs/integrations/tools/huggingface_tools.ipynb +++ b/docs/docs/integrations/tools/huggingface_tools.ipynb @@ -22,6 +22,16 @@ "%pip install --upgrade --quiet transformers huggingface_hub > /dev/null" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5b9279f", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 1, diff --git a/docs/docs/integrations/tools/human_tools.ipynb b/docs/docs/integrations/tools/human_tools.ipynb index 49d54bb7aa9..ab44da90f2b 100644 --- a/docs/docs/integrations/tools/human_tools.ipynb +++ b/docs/docs/integrations/tools/human_tools.ipynb @@ -10,6 +10,15 @@ "when it is confused." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 1, diff --git a/docs/docs/integrations/tools/ifttt.ipynb b/docs/docs/integrations/tools/ifttt.ipynb index 2e86a60787a..4cd3bf9f687 100644 --- a/docs/docs/integrations/tools/ifttt.ipynb +++ b/docs/docs/integrations/tools/ifttt.ipynb @@ -44,6 +44,16 @@ "https://maker.ifttt.com/use/YOUR_IFTTT_KEY. Grab the YOUR_IFTTT_KEY value.\n" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "d356bc92", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-community" + ] + }, { "cell_type": "code", "execution_count": 1, diff --git a/docs/docs/integrations/tools/passio_nutrition_ai.ipynb b/docs/docs/integrations/tools/passio_nutrition_ai.ipynb index 451ccc2e374..5d67b2fc5d7 100644 --- a/docs/docs/integrations/tools/passio_nutrition_ai.ipynb +++ b/docs/docs/integrations/tools/passio_nutrition_ai.ipynb @@ -5,7 +5,7 @@ "id": "f4c03f40-1328-412d-8a48-1db0cd481b77", "metadata": {}, "source": [ - "# Quickstart\n", + "# Passio NutritionAI\n", "\n", "To best understand how NutritionAI can give your agents super food-nutrition powers, let's build an agent that can find that information via Passio NutritionAI.\n", "\n", diff --git a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb index 770251137ff..805166dd23f 100644 --- a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb +++ b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken" + "%pip install --upgrade --quiet langchain-openai langchain-community 'deeplake[enterprise]' tiktoken" ] }, { diff --git a/docs/docs/integrations/vectorstores/aerospike.ipynb b/docs/docs/integrations/vectorstores/aerospike.ipynb index 9a72250db10..b2ad324b055 100644 --- a/docs/docs/integrations/vectorstores/aerospike.ipynb +++ b/docs/docs/integrations/vectorstores/aerospike.ipynb @@ -51,7 +51,7 @@ }, "outputs": [], "source": [ - "!pip install --upgrade --quiet aerospike-vector-search==0.6.1 sentence-transformers langchain" + "!pip install --upgrade --quiet aerospike-vector-search==0.6.1 langchain-community sentence-transformers langchain" ] }, { diff --git a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb index 6c2f03c223e..50ea0b13977 100644 --- a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet alibabacloud_ha3engine_vector" + "%pip install --upgrade --quiet langchain-community alibabacloud_ha3engine_vector" ] }, { diff --git a/docs/docs/integrations/vectorstores/analyticdb.ipynb b/docs/docs/integrations/vectorstores/analyticdb.ipynb index dc3404651a4..594adc5560b 100644 --- a/docs/docs/integrations/vectorstores/analyticdb.ipynb +++ b/docs/docs/integrations/vectorstores/analyticdb.ipynb @@ -10,8 +10,11 @@ "\n", ">`AnalyticDB for PostgreSQL` is developed based on the open-source `Greenplum Database` project and is enhanced with in-depth extensions by `Alibaba Cloud`. AnalyticDB for PostgreSQL is compatible with the ANSI SQL 2003 syntax and the PostgreSQL and Oracle database ecosystems. AnalyticDB for PostgreSQL also supports row store and column store. AnalyticDB for PostgreSQL processes petabytes of data offline at a high performance level and supports highly concurrent online queries.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `AnalyticDB` vector database.\n", "To run, you should have an [AnalyticDB](https://www.alibabacloud.com/help/en/analyticdb-for-postgresql/latest/product-introduction-overview) instance up and running:\n", + "\n", "- Using [AnalyticDB Cloud Vector Database](https://www.alibabacloud.com/product/hybriddb-postgresql). Click here to fast deploy it." ] }, diff --git a/docs/docs/integrations/vectorstores/annoy.ipynb b/docs/docs/integrations/vectorstores/annoy.ipynb index 72792259e6e..4e3e0486d39 100644 --- a/docs/docs/integrations/vectorstores/annoy.ipynb +++ b/docs/docs/integrations/vectorstores/annoy.ipynb @@ -9,6 +9,8 @@ "\n", "> [Annoy](https://github.com/spotify/annoy) (`Approximate Nearest Neighbors Oh Yeah`) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are mmapped into memory so that many processes may share the same data.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `Annoy` vector database." ] }, diff --git a/docs/docs/integrations/vectorstores/apache_doris.ipynb b/docs/docs/integrations/vectorstores/apache_doris.ipynb index a4970ed912f..92239a7a60b 100644 --- a/docs/docs/integrations/vectorstores/apache_doris.ipynb +++ b/docs/docs/integrations/vectorstores/apache_doris.ipynb @@ -14,6 +14,8 @@ "\n", ">Usually `Apache Doris` is categorized into OLAP, and it has showed excellent performance in [ClickBench — a Benchmark For Analytical DBMS](https://benchmark.clickhouse.com/). Since it has a super-fast vectorized execution engine, it could also be used as a fast vectordb.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "Here we'll show how to use the Apache Doris Vector Store." ] }, diff --git a/docs/docs/integrations/vectorstores/astradb.ipynb b/docs/docs/integrations/vectorstores/astradb.ipynb index d7fa83ef08f..b185df23f1c 100644 --- a/docs/docs/integrations/vectorstores/astradb.ipynb +++ b/docs/docs/integrations/vectorstores/astradb.ipynb @@ -23,6 +23,8 @@ "id": "d2d6ca14-fb7e-4172-9aa0-a3119a064b96", "metadata": {}, "source": [ + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "_Note: in addition to access to the database, an OpenAI API Key is required to run the full example._" ] }, diff --git a/docs/docs/integrations/vectorstores/atlas.ipynb b/docs/docs/integrations/vectorstores/atlas.ipynb index 0887b4c03f5..39cc4f70202 100644 --- a/docs/docs/integrations/vectorstores/atlas.ipynb +++ b/docs/docs/integrations/vectorstores/atlas.ipynb @@ -10,6 +10,8 @@ "\n", ">[Atlas](https://docs.nomic.ai/index.html) is a platform by Nomic made for interacting with both small and internet scale unstructured datasets. It enables anyone to visualize, search, and share massive datasets in their browser.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows you how to use functionality related to the `AtlasDB` vectorstore." ] }, diff --git a/docs/docs/integrations/vectorstores/awadb.ipynb b/docs/docs/integrations/vectorstores/awadb.ipynb index a7592e03c17..96c3f55de34 100644 --- a/docs/docs/integrations/vectorstores/awadb.ipynb +++ b/docs/docs/integrations/vectorstores/awadb.ipynb @@ -8,6 +8,8 @@ "# AwaDB\n", ">[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `AwaDB`." ] }, diff --git a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb index 2fa1ce9794c..06403c7475c 100644 --- a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb +++ b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb @@ -51,7 +51,7 @@ } ], "source": [ - "%pip install --upgrade --quiet pymongo" + "%pip install --upgrade --quiet pymongo langchain-openai langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/azuresearch.ipynb b/docs/docs/integrations/vectorstores/azuresearch.ipynb index f6345272f5e..06c8c812632 100644 --- a/docs/docs/integrations/vectorstores/azuresearch.ipynb +++ b/docs/docs/integrations/vectorstores/azuresearch.ipynb @@ -8,7 +8,9 @@ "source": [ "# Azure AI Search\n", "\n", - "[Azure AI Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) (formerly known as `Azure Search` and `Azure Cognitive Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for information retrieval of vector, keyword, and hybrid queries at scale.\n" + "[Azure AI Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) (formerly known as `Azure Search` and `Azure Cognitive Search`) is a cloud search service that gives developers infrastructure, APIs, and tools for information retrieval of vector, keyword, and hybrid queries at scale.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/bagel.ipynb b/docs/docs/integrations/vectorstores/bagel.ipynb index 29ac3cf9eec..6f8665f6c56 100644 --- a/docs/docs/integrations/vectorstores/bagel.ipynb +++ b/docs/docs/integrations/vectorstores/bagel.ipynb @@ -14,7 +14,7 @@ "### Installation and Setup\n", "\n", "```bash\n", - "pip install bagelML\n", + "pip install bagelML langchain-community\n", "```\n", "\n" ] diff --git a/docs/docs/integrations/vectorstores/bageldb.ipynb b/docs/docs/integrations/vectorstores/bageldb.ipynb index e3605d7ee3b..71dbcd6bf84 100644 --- a/docs/docs/integrations/vectorstores/bageldb.ipynb +++ b/docs/docs/integrations/vectorstores/bageldb.ipynb @@ -14,7 +14,7 @@ "### Installation and Setup\n", "\n", "```bash\n", - "pip install betabageldb\n", + "pip install betabageldb langchain-community\n", "```\n", "\n" ] diff --git a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb index 6ccbcbde0fa..4ddc2b34efa 100644 --- a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb @@ -39,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet elasticsearch == 7.11.0" + "%pip install --upgrade --quiet langchain-community elasticsearch == 7.11.0" ] }, { diff --git a/docs/docs/integrations/vectorstores/baiduvectordb.ipynb b/docs/docs/integrations/vectorstores/baiduvectordb.ipynb index 89cdc1a22ae..b8515f1e6ae 100644 --- a/docs/docs/integrations/vectorstores/baiduvectordb.ipynb +++ b/docs/docs/integrations/vectorstores/baiduvectordb.ipynb @@ -15,6 +15,8 @@ "\n", ">This database service supports a diverse range of index types and similarity calculation methods, catering to various use cases. A standout feature of VectorDB is its capacity to manage an immense vector scale of up to 10 billion, while maintaining impressive query performance, supporting millions of queries per second (QPS) with millisecond-level query latency.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the Baidu VectorDB. \n", "\n", "To run, you should have a [Database instance.](https://cloud.baidu.com/doc/VDB/s/hlrsoazuf)." diff --git a/docs/docs/integrations/vectorstores/cassandra.ipynb b/docs/docs/integrations/vectorstores/cassandra.ipynb index ab43444f623..e7f9a19a62d 100644 --- a/docs/docs/integrations/vectorstores/cassandra.ipynb +++ b/docs/docs/integrations/vectorstores/cassandra.ipynb @@ -49,7 +49,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet \"cassio>=0.1.4\"" + "%pip install --upgrade --quiet langchain-community \"cassio>=0.1.4\"" ] }, { diff --git a/docs/docs/integrations/vectorstores/clarifai.ipynb b/docs/docs/integrations/vectorstores/clarifai.ipynb index c91ff706408..03baea6e201 100644 --- a/docs/docs/integrations/vectorstores/clarifai.ipynb +++ b/docs/docs/integrations/vectorstores/clarifai.ipynb @@ -35,7 +35,7 @@ "outputs": [], "source": [ "# Install required dependencies\n", - "%pip install --upgrade --quiet clarifai" + "%pip install --upgrade --quiet clarifai langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/clickhouse.ipynb b/docs/docs/integrations/vectorstores/clickhouse.ipynb index 90527963b97..2b0136dff6b 100644 --- a/docs/docs/integrations/vectorstores/clickhouse.ipynb +++ b/docs/docs/integrations/vectorstores/clickhouse.ipynb @@ -9,6 +9,8 @@ "\n", "> [ClickHouse](https://clickhouse.com/) is the fastest and most resource efficient open-source database for real-time apps and analytics with full SQL support and a wide range of functions to assist users in writing analytical queries. Lately added data structures and distance search functions (like `L2Distance`) as well as [approximate nearest neighbor search indexes](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/annindexes) enable ClickHouse to be used as a high performance and scalable vector database to store and search vectors with SQL.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `ClickHouse` vector search." ] }, diff --git a/docs/docs/integrations/vectorstores/couchbase.ipynb b/docs/docs/integrations/vectorstores/couchbase.ipynb index 9e18bf91970..8035376aebd 100644 --- a/docs/docs/integrations/vectorstores/couchbase.ipynb +++ b/docs/docs/integrations/vectorstores/couchbase.ipynb @@ -28,7 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai couchbase" + "%pip install --upgrade --quiet langchain langchain-openai langchain-community couchbase" ] }, { diff --git a/docs/docs/integrations/vectorstores/dashvector.ipynb b/docs/docs/integrations/vectorstores/dashvector.ipynb index adb4c8a420c..f42b6d4f072 100644 --- a/docs/docs/integrations/vectorstores/dashvector.ipynb +++ b/docs/docs/integrations/vectorstores/dashvector.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet dashvector dashscope" + "%pip install --upgrade --quiet langchain-community dashvector dashscope" ] }, { diff --git a/docs/docs/integrations/vectorstores/dingo.ipynb b/docs/docs/integrations/vectorstores/dingo.ipynb index 72f5ac3f75d..3d0934d4b07 100644 --- a/docs/docs/integrations/vectorstores/dingo.ipynb +++ b/docs/docs/integrations/vectorstores/dingo.ipynb @@ -9,6 +9,8 @@ "\n", ">[DingoDB](https://dingodb.readthedocs.io/en/latest/) is a distributed multi-mode vector database, which combines the characteristics of data lakes and vector databases, and can store data of any type and size (Key-Value, PDF, audio, video, etc.). It has real-time low-latency processing capabilities to achieve rapid insight and response, and can efficiently conduct instant analysis and process multi-modal data.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the DingoDB vector database.\n", "\n", "To run, you should have a [DingoDB instance up and running](https://github.com/dingodb/dingo-deploy/blob/main/README.md)." diff --git a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb index 63f8baecb52..87931f415b1 100644 --- a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb @@ -9,6 +9,8 @@ "\n", ">[DocArrayHnswSearch](https://docs.docarray.org/user_guide/storing/index_hnswlib/) is a lightweight Document Index implementation provided by [Docarray](https://github.com/docarray/docarray) that runs fully locally and is best suited for small- to medium-sized datasets. It stores vectors on disk in [hnswlib](https://github.com/nmslib/hnswlib), and stores all other data in [SQLite](https://www.sqlite.org/index.html).\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `DocArrayHnswSearch`." ] }, diff --git a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb index 81a4f49ec91..7185bf26391 100644 --- a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb @@ -31,7 +31,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet \"docarray\"" + "%pip install --upgrade --quiet langchain-community \"docarray\"" ] }, { diff --git a/docs/docs/integrations/vectorstores/duckdb.ipynb b/docs/docs/integrations/vectorstores/duckdb.ipynb index bff41b1278e..be23a3f2b8d 100644 --- a/docs/docs/integrations/vectorstores/duckdb.ipynb +++ b/docs/docs/integrations/vectorstores/duckdb.ipynb @@ -14,7 +14,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install duckdb" + "! pip install duckdb langchain langchain-community langchain-openai" ] }, { @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import DuckDB\n", + "from langchain_community.vectorstores import DuckDB\n", "from langchain_openai import OpenAIEmbeddings" ] }, @@ -86,7 +86,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -100,9 +100,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" + "version": "3.9.1" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb index 0082c5c0b6e..ffe976f5b14 100644 --- a/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/ecloud_vector_search.ipynb @@ -9,6 +9,8 @@ "\n", ">[China Mobile ECloud VectorSearch](https://ecloud.10086.cn/portal/product/elasticsearch) is a fully managed, enterprise-level distributed search and analysis service. China Mobile ECloud VectorSearch provides low-cost, high-performance, and reliable retrieval and analysis platform level product services for structured/unstructured data. As a vector database , it supports multiple index types and similarity distance methods. \n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `ECloud ElasticSearch VectorStore`.\n", "To run, you should have an [China Mobile ECloud VectorSearch](https://ecloud.10086.cn/portal/product/elasticsearch) instance up and running:\n", "\n", @@ -66,8 +68,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import EcloudESVectorStore\n", "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import EcloudESVectorStore\n", "from langchain_openai import OpenAIEmbeddings\n", "from langchain_text_splitters import CharacterTextSplitter" ] diff --git a/docs/docs/integrations/vectorstores/epsilla.ipynb b/docs/docs/integrations/vectorstores/epsilla.ipynb index e0bfbd84c4d..5d0e01678a7 100644 --- a/docs/docs/integrations/vectorstores/epsilla.ipynb +++ b/docs/docs/integrations/vectorstores/epsilla.ipynb @@ -9,6 +9,8 @@ "\n", ">[Epsilla](https://www.epsilla.com) is an open-source vector database that leverages the advanced parallel graph traversal techniques for vector indexing. Epsilla is licensed under GPL-3.0.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use the functionalities related to the `Epsilla` vector database.\n", "\n", "As a prerequisite, you need to have a running Epsilla vector database (for example, through our docker image), and install the ``pyepsilla`` package. View full docs at [docs](https://epsilla-inc.gitbook.io/epsilladb/quick-start)." diff --git a/docs/docs/integrations/vectorstores/faiss.ipynb b/docs/docs/integrations/vectorstores/faiss.ipynb index 13425e7cc90..e95d5f76c4b 100644 --- a/docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/docs/integrations/vectorstores/faiss.ipynb @@ -11,6 +11,8 @@ "\n", "[Faiss documentation](https://faiss.ai/).\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `FAISS` vector database. It will show functionality specific to this integration. After going through, it may be useful to explore [relevant use-case pages](/docs/how_to#qa-with-rag) to learn how to use this vectorstore as part of a larger chain." ] }, diff --git a/docs/docs/integrations/vectorstores/faiss_async.ipynb b/docs/docs/integrations/vectorstores/faiss_async.ipynb index 09a638b9a45..770f8b78e0f 100644 --- a/docs/docs/integrations/vectorstores/faiss_async.ipynb +++ b/docs/docs/integrations/vectorstores/faiss_async.ipynb @@ -11,6 +11,8 @@ "\n", "[Faiss documentation](https://faiss.ai/).\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `FAISS` vector database using `asyncio`.\n", "LangChain implemented the synchronous and asynchronous vector store functions.\n", "\n", diff --git a/docs/docs/integrations/vectorstores/hippo.ipynb b/docs/docs/integrations/vectorstores/hippo.ipynb index dd2013edef5..eecb5c874fc 100644 --- a/docs/docs/integrations/vectorstores/hippo.ipynb +++ b/docs/docs/integrations/vectorstores/hippo.ipynb @@ -59,7 +59,7 @@ } ], "source": [ - "%pip install --upgrade --quiet langchain tiktoken langchain-openai\n", + "%pip install --upgrade --quiet langchain langchain_community tiktoken langchain-openai\n", "%pip install --upgrade --quiet hippo-api==1.1.0.rc3" ] }, diff --git a/docs/docs/integrations/vectorstores/hologres.ipynb b/docs/docs/integrations/vectorstores/hologres.ipynb index 31541f85e06..5bf70d6e25e 100644 --- a/docs/docs/integrations/vectorstores/hologres.ipynb +++ b/docs/docs/integrations/vectorstores/hologres.ipynb @@ -22,7 +22,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet hologres-vector" + "%pip install --upgrade --quiet langchain_community hologres-vector" ] }, { diff --git a/docs/docs/integrations/vectorstores/jaguar.ipynb b/docs/docs/integrations/vectorstores/jaguar.ipynb index 0e520a0d299..c538a33532d 100644 --- a/docs/docs/integrations/vectorstores/jaguar.ipynb +++ b/docs/docs/integrations/vectorstores/jaguar.ipynb @@ -35,7 +35,9 @@ "2. You must install the http client package for JaguarDB:\n", " ```\n", " pip install -U jaguardb-http-client\n", - " ```\n" + " ```\n", + " \n", + "3. You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n" ] }, { diff --git a/docs/docs/integrations/vectorstores/kdbai.ipynb b/docs/docs/integrations/vectorstores/kdbai.ipynb index 302bf5cb7d8..74d177548de 100644 --- a/docs/docs/integrations/vectorstores/kdbai.ipynb +++ b/docs/docs/integrations/vectorstores/kdbai.ipynb @@ -17,6 +17,8 @@ "\n", "The following examples demonstrate some of the ways you can interact with KDB.AI through LangChain.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "## Import required packages" ] }, @@ -47,7 +49,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "KDB.AI endpoint: https://ui.qa.cld.kx.com/instance/pcnvlmi860\n", diff --git a/docs/docs/integrations/vectorstores/kinetica.ipynb b/docs/docs/integrations/vectorstores/kinetica.ipynb index ef29f4b97be..5df1dfd96e8 100644 --- a/docs/docs/integrations/vectorstores/kinetica.ipynb +++ b/docs/docs/integrations/vectorstores/kinetica.ipynb @@ -60,7 +60,7 @@ ], "source": [ "# Pip install necessary package\n", - "%pip install --upgrade --quiet langchain-openai\n", + "%pip install --upgrade --quiet langchain-openai langchain-community\n", "%pip install gpudb==7.2.0.1\n", "%pip install --upgrade --quiet tiktoken" ] diff --git a/docs/docs/integrations/vectorstores/lancedb.ipynb b/docs/docs/integrations/vectorstores/lancedb.ipynb index bcdd38756fe..fd59f044264 100644 --- a/docs/docs/integrations/vectorstores/lancedb.ipynb +++ b/docs/docs/integrations/vectorstores/lancedb.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install -U langchain-openai" + "! pip install -U langchain-openai langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/lantern.ipynb b/docs/docs/integrations/vectorstores/lantern.ipynb index 01fb3447283..bad29bea1d7 100644 --- a/docs/docs/integrations/vectorstores/lantern.ipynb +++ b/docs/docs/integrations/vectorstores/lantern.ipynb @@ -12,6 +12,8 @@ "- Exact and approximate nearest neighbor search\n", "- L2 squared distance, hamming distance, and cosine distance\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use the Postgres vector database (`Lantern`)." ] }, @@ -50,7 +52,7 @@ }, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "OpenAI API Key: ········\n" @@ -144,7 +146,7 @@ }, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "DB Connection String: ········\n" diff --git a/docs/docs/integrations/vectorstores/llm_rails.ipynb b/docs/docs/integrations/vectorstores/llm_rails.ipynb index 1ac0a57b6f4..0cbb34036c3 100644 --- a/docs/docs/integrations/vectorstores/llm_rails.ipynb +++ b/docs/docs/integrations/vectorstores/llm_rails.ipynb @@ -10,6 +10,8 @@ ">[LLMRails](https://www.llmrails.com/) is a API platform for building GenAI applications. It provides an easy-to-use API for document indexing and querying that is managed by LLMRails and is optimized for performance and accuracy. \n", "See the [LLMRails API documentation ](https://docs.llmrails.com/) for more information on how to use the API.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use functionality related to the `LLMRails`'s integration with langchain.\n", "Note that unlike many other integrations in this category, LLMRails provides an end-to-end managed service for retrieval augmented generation, which includes:\n", "1. A way to extract text from document files and chunk them into sentences.\n", diff --git a/docs/docs/integrations/vectorstores/manticore_search.ipynb b/docs/docs/integrations/vectorstores/manticore_search.ipynb new file mode 100644 index 00000000000..f606d84eb11 --- /dev/null +++ b/docs/docs/integrations/vectorstores/manticore_search.ipynb @@ -0,0 +1,443 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bf48a5c8c3d125e1", + "metadata": { + "collapsed": false + }, + "source": [ + "# ManticoreSearch VectorStore\n", + "\n", + "[ManticoreSearch](https://manticoresearch.com/) is an open-source search engine that offers fast, scalable, and user-friendly capabilities. Originating as a fork of [Sphinx Search](http://sphinxsearch.com/), it has evolved to incorporate modern search engine features and improvements. ManticoreSearch distinguishes itself with its robust performance and ease of integration into various applications.\n", + "\n", + "ManticoreSearch has recently introduced [vector search capabilities](https://manual.manticoresearch.com/dev/Searching/KNN), starting with search engine version 6.2 and only with [manticore-columnar-lib](https://github.com/manticoresoftware/columnar) package installed. This feature is a considerable advancement, allowing for the execution of searches based on vector similarity.\n", + "\n", + "As of now, the vector search functionality is only accessible in the developmental (dev) versions of the search engine. Consequently, it is imperative to employ a developmental [manticoresearch-dev](https://pypi.org/project/manticoresearch-dev/) Python client for utilizing this feature effectively." + ] + }, + { + "cell_type": "markdown", + "id": "d5050b607ca217ad", + "metadata": { + "collapsed": false + }, + "source": [ + "## Setting up environments" + ] + }, + { + "cell_type": "markdown", + "id": "b26c5ab7f89a61fc", + "metadata": { + "collapsed": false + }, + "source": [ + "Starting Docker-container with ManticoreSearch and installing manticore-columnar-lib package (optional)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "initial_id", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-03T11:28:37.177840Z", + "start_time": "2024-03-03T11:28:26.863511Z" + }, + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Get:1 http://repo.manticoresearch.com/repository/manticoresearch_jammy_dev jammy InRelease [3525 kB]\r\n", + "Get:2 http://archive.ubuntu.com/ubuntu jammy InRelease [270 kB] \r\n", + "Get:3 http://security.ubuntu.com/ubuntu jammy-security InRelease [110 kB] \r\n", + "Get:4 http://archive.ubuntu.com/ubuntu jammy-updates InRelease [119 kB] \r\n", + "Get:5 http://security.ubuntu.com/ubuntu jammy-security/universe amd64 Packages [1074 kB]\r\n", + "Get:6 http://archive.ubuntu.com/ubuntu jammy-backports InRelease [109 kB] \r\n", + "Get:7 http://archive.ubuntu.com/ubuntu jammy/universe amd64 Packages [17.5 MB] \r\n", + "Get:8 http://security.ubuntu.com/ubuntu jammy-security/main amd64 Packages [1517 kB]\r\n", + "Get:9 http://security.ubuntu.com/ubuntu jammy-security/restricted amd64 Packages [1889 kB]\r\n", + "Get:10 http://security.ubuntu.com/ubuntu jammy-security/multiverse amd64 Packages [44.6 kB]\r\n", + "Get:11 http://archive.ubuntu.com/ubuntu jammy/restricted amd64 Packages [164 kB]\r\n", + "Get:12 http://archive.ubuntu.com/ubuntu jammy/multiverse amd64 Packages [266 kB]\r\n", + "Get:13 http://archive.ubuntu.com/ubuntu jammy/main amd64 Packages [1792 kB] \r\n", + "Get:14 http://archive.ubuntu.com/ubuntu jammy-updates/multiverse amd64 Packages [50.4 kB]\r\n", + "Get:15 http://archive.ubuntu.com/ubuntu jammy-updates/restricted amd64 Packages [1927 kB]\r\n", + "Get:16 http://archive.ubuntu.com/ubuntu jammy-updates/universe amd64 Packages [1346 kB]\r\n", + "Get:17 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 Packages [1796 kB]\r\n", + "Get:18 http://archive.ubuntu.com/ubuntu jammy-backports/universe amd64 Packages [28.1 kB]\r\n", + "Get:19 http://archive.ubuntu.com/ubuntu jammy-backports/main amd64 Packages [50.4 kB]\r\n", + "Get:20 http://repo.manticoresearch.com/repository/manticoresearch_jammy_dev jammy/main amd64 Packages [5020 kB]\r\n", + "Fetched 38.6 MB in 7s (5847 kB/s) \r\n", + "Reading package lists... Done\r\n", + "Reading package lists... Done\r\n", + "Building dependency tree... Done\r\n", + "Reading state information... Done\r\n", + "The following NEW packages will be installed:\r\n", + " manticore-columnar-lib\r\n", + "0 upgraded, 1 newly installed, 0 to remove and 21 not upgraded.\r\n", + "Need to get 1990 kB of archives.\r\n", + "After this operation, 10.0 MB of additional disk space will be used.\r\n", + "Get:1 http://repo.manticoresearch.com/repository/manticoresearch_jammy_dev jammy/main amd64 manticore-columnar-lib amd64 2.2.5-240217-a5342a1 [1990 kB]\r\n", + "Fetched 1990 kB in 1s (1505 kB/s) \r\n", + "debconf: delaying package configuration, since apt-utils is not installed\r\n", + "Selecting previously unselected package manticore-columnar-lib.\r\n", + "(Reading database ... 12260 files and directories currently installed.)\r\n", + "Preparing to unpack .../manticore-columnar-lib_2.2.5-240217-a5342a1_amd64.deb ...\r\n", + "Unpacking manticore-columnar-lib (2.2.5-240217-a5342a1) ...\r\n", + "Setting up manticore-columnar-lib (2.2.5-240217-a5342a1) ...\r\n", + "a546aec22291\r\n" + ] + } + ], + "source": [ + "import time\n", + "\n", + "# Start container\n", + "containers = !docker ps --filter \"name=langchain-manticoresearch-server\" -q\n", + "if len(containers) == 0:\n", + " !docker run -d -p 9308:9308 --name langchain-manticoresearch-server manticoresearch/manticore:dev\n", + " time.sleep(20) # Wait for the container to start up\n", + "\n", + "# Get ID of container\n", + "container_id = containers[0]\n", + "\n", + "# Install manticore-columnar-lib package as root user\n", + "!docker exec -it --user 0 {container_id} apt-get update\n", + "!docker exec -it --user 0 {container_id} apt-get install -y manticore-columnar-lib\n", + "\n", + "# Restart container\n", + "!docker restart {container_id}" + ] + }, + { + "cell_type": "markdown", + "id": "42284e4c8fd0aeb4", + "metadata": { + "collapsed": false + }, + "source": [ + "Installing ManticoreSearch python client" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "bc7bd70a63cc8d90", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-03T11:28:38.544198Z", + "start_time": "2024-03-03T11:28:37.178755Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\r\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\r\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\r\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install --upgrade --quiet manticoresearch-dev" + ] + }, + { + "cell_type": "markdown", + "id": "f90b4793255edcb1", + "metadata": { + "collapsed": false + }, + "source": [ + "We want to use OpenAIEmbeddings so we have to get the OpenAI API Key." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "a303c63186fd8abd", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-03T11:28:38.546877Z", + "start_time": "2024-03-03T11:28:38.544907Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.embeddings import GPT4AllEmbeddings\n", + "from langchain_community.vectorstores import ManticoreSearch, ManticoreSearchSettings" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "46ad30f36815ed15", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-03T11:28:38.991083Z", + "start_time": "2024-03-03T11:28:38.547705Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Created a chunk of size 338, which is longer than the specified 100\n", + "Created a chunk of size 508, which is longer than the specified 100\n", + "Created a chunk of size 277, which is longer than the specified 100\n", + "Created a chunk of size 777, which is longer than the specified 100\n", + "Created a chunk of size 247, which is longer than the specified 100\n", + "Created a chunk of size 228, which is longer than the specified 100\n", + "Created a chunk of size 557, which is longer than the specified 100\n", + "Created a chunk of size 587, which is longer than the specified 100\n", + "Created a chunk of size 173, which is longer than the specified 100\n", + "Created a chunk of size 622, which is longer than the specified 100\n", + "Created a chunk of size 775, which is longer than the specified 100\n", + "Created a chunk of size 292, which is longer than the specified 100\n", + "Created a chunk of size 456, which is longer than the specified 100\n", + "Created a chunk of size 291, which is longer than the specified 100\n", + "Created a chunk of size 367, which is longer than the specified 100\n", + "Created a chunk of size 604, which is longer than the specified 100\n", + "Created a chunk of size 618, which is longer than the specified 100\n", + "Created a chunk of size 340, which is longer than the specified 100\n", + "Created a chunk of size 395, which is longer than the specified 100\n", + "Created a chunk of size 321, which is longer than the specified 100\n", + "Created a chunk of size 453, which is longer than the specified 100\n", + "Created a chunk of size 354, which is longer than the specified 100\n", + "Created a chunk of size 481, which is longer than the specified 100\n", + "Created a chunk of size 233, which is longer than the specified 100\n", + "Created a chunk of size 270, which is longer than the specified 100\n", + "Created a chunk of size 305, which is longer than the specified 100\n", + "Created a chunk of size 520, which is longer than the specified 100\n", + "Created a chunk of size 289, which is longer than the specified 100\n", + "Created a chunk of size 280, which is longer than the specified 100\n", + "Created a chunk of size 417, which is longer than the specified 100\n", + "Created a chunk of size 495, which is longer than the specified 100\n", + "Created a chunk of size 602, which is longer than the specified 100\n", + "Created a chunk of size 1004, which is longer than the specified 100\n", + "Created a chunk of size 272, which is longer than the specified 100\n", + "Created a chunk of size 1203, which is longer than the specified 100\n", + "Created a chunk of size 844, which is longer than the specified 100\n", + "Created a chunk of size 135, which is longer than the specified 100\n", + "Created a chunk of size 306, which is longer than the specified 100\n", + "Created a chunk of size 407, which is longer than the specified 100\n", + "Created a chunk of size 910, which is longer than the specified 100\n", + "Created a chunk of size 398, which is longer than the specified 100\n", + "Created a chunk of size 674, which is longer than the specified 100\n", + "Created a chunk of size 356, which is longer than the specified 100\n", + "Created a chunk of size 474, which is longer than the specified 100\n", + "Created a chunk of size 814, which is longer than the specified 100\n", + "Created a chunk of size 530, which is longer than the specified 100\n", + "Created a chunk of size 469, which is longer than the specified 100\n", + "Created a chunk of size 489, which is longer than the specified 100\n", + "Created a chunk of size 433, which is longer than the specified 100\n", + "Created a chunk of size 603, which is longer than the specified 100\n", + "Created a chunk of size 380, which is longer than the specified 100\n", + "Created a chunk of size 354, which is longer than the specified 100\n", + "Created a chunk of size 391, which is longer than the specified 100\n", + "Created a chunk of size 772, which is longer than the specified 100\n", + "Created a chunk of size 267, which is longer than the specified 100\n", + "Created a chunk of size 571, which is longer than the specified 100\n", + "Created a chunk of size 594, which is longer than the specified 100\n", + "Created a chunk of size 458, which is longer than the specified 100\n", + "Created a chunk of size 386, which is longer than the specified 100\n", + "Created a chunk of size 417, which is longer than the specified 100\n", + "Created a chunk of size 370, which is longer than the specified 100\n", + "Created a chunk of size 402, which is longer than the specified 100\n", + "Created a chunk of size 306, which is longer than the specified 100\n", + "Created a chunk of size 173, which is longer than the specified 100\n", + "Created a chunk of size 628, which is longer than the specified 100\n", + "Created a chunk of size 321, which is longer than the specified 100\n", + "Created a chunk of size 294, which is longer than the specified 100\n", + "Created a chunk of size 689, which is longer than the specified 100\n", + "Created a chunk of size 641, which is longer than the specified 100\n", + "Created a chunk of size 473, which is longer than the specified 100\n", + "Created a chunk of size 414, which is longer than the specified 100\n", + "Created a chunk of size 585, which is longer than the specified 100\n", + "Created a chunk of size 764, which is longer than the specified 100\n", + "Created a chunk of size 502, which is longer than the specified 100\n", + "Created a chunk of size 640, which is longer than the specified 100\n", + "Created a chunk of size 507, which is longer than the specified 100\n", + "Created a chunk of size 564, which is longer than the specified 100\n", + "Created a chunk of size 707, which is longer than the specified 100\n", + "Created a chunk of size 380, which is longer than the specified 100\n", + "Created a chunk of size 615, which is longer than the specified 100\n", + "Created a chunk of size 733, which is longer than the specified 100\n", + "Created a chunk of size 277, which is longer than the specified 100\n", + "Created a chunk of size 497, which is longer than the specified 100\n", + "Created a chunk of size 625, which is longer than the specified 100\n", + "Created a chunk of size 468, which is longer than the specified 100\n", + "Created a chunk of size 289, which is longer than the specified 100\n", + "Created a chunk of size 576, which is longer than the specified 100\n", + "Created a chunk of size 297, which is longer than the specified 100\n", + "Created a chunk of size 534, which is longer than the specified 100\n", + "Created a chunk of size 427, which is longer than the specified 100\n", + "Created a chunk of size 412, which is longer than the specified 100\n", + "Created a chunk of size 381, which is longer than the specified 100\n", + "Created a chunk of size 417, which is longer than the specified 100\n", + "Created a chunk of size 244, which is longer than the specified 100\n", + "Created a chunk of size 307, which is longer than the specified 100\n", + "Created a chunk of size 528, which is longer than the specified 100\n", + "Created a chunk of size 565, which is longer than the specified 100\n", + "Created a chunk of size 487, which is longer than the specified 100\n", + "Created a chunk of size 470, which is longer than the specified 100\n", + "Created a chunk of size 332, which is longer than the specified 100\n", + "Created a chunk of size 552, which is longer than the specified 100\n", + "Created a chunk of size 427, which is longer than the specified 100\n", + "Created a chunk of size 596, which is longer than the specified 100\n", + "Created a chunk of size 192, which is longer than the specified 100\n", + "Created a chunk of size 403, which is longer than the specified 100\n", + "Created a chunk of size 255, which is longer than the specified 100\n", + "Created a chunk of size 1025, which is longer than the specified 100\n", + "Created a chunk of size 438, which is longer than the specified 100\n", + "Created a chunk of size 900, which is longer than the specified 100\n", + "Created a chunk of size 250, which is longer than the specified 100\n", + "Created a chunk of size 614, which is longer than the specified 100\n", + "Created a chunk of size 635, which is longer than the specified 100\n", + "Created a chunk of size 443, which is longer than the specified 100\n", + "Created a chunk of size 478, which is longer than the specified 100\n", + "Created a chunk of size 473, which is longer than the specified 100\n", + "Created a chunk of size 302, which is longer than the specified 100\n", + "Created a chunk of size 549, which is longer than the specified 100\n", + "Created a chunk of size 644, which is longer than the specified 100\n", + "Created a chunk of size 402, which is longer than the specified 100\n", + "Created a chunk of size 489, which is longer than the specified 100\n", + "Created a chunk of size 551, which is longer than the specified 100\n", + "Created a chunk of size 527, which is longer than the specified 100\n", + "Created a chunk of size 563, which is longer than the specified 100\n", + "Created a chunk of size 472, which is longer than the specified 100\n", + "Created a chunk of size 511, which is longer than the specified 100\n", + "Created a chunk of size 419, which is longer than the specified 100\n", + "Created a chunk of size 245, which is longer than the specified 100\n", + "Created a chunk of size 371, which is longer than the specified 100\n", + "Created a chunk of size 484, which is longer than the specified 100\n", + "Created a chunk of size 306, which is longer than the specified 100\n", + "Created a chunk of size 190, which is longer than the specified 100\n", + "Created a chunk of size 499, which is longer than the specified 100\n", + "Created a chunk of size 480, which is longer than the specified 100\n", + "Created a chunk of size 634, which is longer than the specified 100\n", + "Created a chunk of size 611, which is longer than the specified 100\n", + "Created a chunk of size 356, which is longer than the specified 100\n", + "Created a chunk of size 478, which is longer than the specified 100\n", + "Created a chunk of size 369, which is longer than the specified 100\n", + "Created a chunk of size 526, which is longer than the specified 100\n", + "Created a chunk of size 311, which is longer than the specified 100\n", + "Created a chunk of size 181, which is longer than the specified 100\n", + "Created a chunk of size 637, which is longer than the specified 100\n", + "Created a chunk of size 219, which is longer than the specified 100\n", + "Created a chunk of size 305, which is longer than the specified 100\n", + "Created a chunk of size 409, which is longer than the specified 100\n", + "Created a chunk of size 235, which is longer than the specified 100\n", + "Created a chunk of size 302, which is longer than the specified 100\n", + "Created a chunk of size 236, which is longer than the specified 100\n", + "Created a chunk of size 209, which is longer than the specified 100\n", + "Created a chunk of size 366, which is longer than the specified 100\n", + "Created a chunk of size 277, which is longer than the specified 100\n", + "Created a chunk of size 591, which is longer than the specified 100\n", + "Created a chunk of size 232, which is longer than the specified 100\n", + "Created a chunk of size 543, which is longer than the specified 100\n", + "Created a chunk of size 199, which is longer than the specified 100\n", + "Created a chunk of size 214, which is longer than the specified 100\n", + "Created a chunk of size 263, which is longer than the specified 100\n", + "Created a chunk of size 375, which is longer than the specified 100\n", + "Created a chunk of size 221, which is longer than the specified 100\n", + "Created a chunk of size 261, which is longer than the specified 100\n", + "Created a chunk of size 203, which is longer than the specified 100\n", + "Created a chunk of size 758, which is longer than the specified 100\n", + "Created a chunk of size 271, which is longer than the specified 100\n", + "Created a chunk of size 323, which is longer than the specified 100\n", + "Created a chunk of size 275, which is longer than the specified 100\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "bert_load_from_file: gguf version = 2\n", + "bert_load_from_file: gguf alignment = 32\n", + "bert_load_from_file: gguf data offset = 695552\n", + "bert_load_from_file: model name = BERT\n", + "bert_load_from_file: model architecture = bert\n", + "bert_load_from_file: model file type = 1\n", + "bert_load_from_file: bert tokenizer vocab = 30522\n" + ] + } + ], + "source": [ + "from langchain_community.document_loaders import TextLoader\n", + "\n", + "loader = TextLoader(\"../../modules/paul_graham_essay.txt\")\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = GPT4AllEmbeddings()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "a06370cae96cbaef", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-03T11:28:42.366398Z", + "start_time": "2024-03-03T11:28:38.991827Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[Document(page_content='Computer Science is an uneasy alliance between two halves, theory and systems. The theory people prove things, and the systems people build things. I wanted to build things. I had plenty of respect for theory — indeed, a sneaking suspicion that it was the more admirable of the two halves — but building things seemed so much more exciting.', metadata={'some': 'metadata'}), Document(page_content=\"I applied to 3 grad schools: MIT and Yale, which were renowned for AI at the time, and Harvard, which I'd visited because Rich Draves went there, and was also home to Bill Woods, who'd invented the type of parser I used in my SHRDLU clone. Only Harvard accepted me, so that was where I went.\", metadata={'some': 'metadata'}), Document(page_content='For my undergraduate thesis, I reverse-engineered SHRDLU. My God did I love working on that program. It was a pleasing bit of code, but what made it even more exciting was my belief — hard to imagine now, but not unique in 1985 — that it was already climbing the lower slopes of intelligence.', metadata={'some': 'metadata'}), Document(page_content=\"The problem with systems work, though, was that it didn't last. Any program you wrote today, no matter how good, would be obsolete in a couple decades at best. People might mention your software in footnotes, but no one would actually use it. And indeed, it would seem very feeble work. Only people with a sense of the history of the field would even realize that, in its time, it had been good.\", metadata={'some': 'metadata'})]\n" + ] + } + ], + "source": [ + "for d in docs:\n", + " d.metadata = {\"some\": \"metadata\"}\n", + "settings = ManticoreSearchSettings(table=\"manticoresearch_vector_search_example\")\n", + "docsearch = ManticoreSearch.from_documents(docs, embeddings, config=settings)\n", + "\n", + "query = \"Robert Morris is\"\n", + "docs = docsearch.similarity_search(query)\n", + "print(docs)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/integrations/vectorstores/marqo.ipynb b/docs/docs/integrations/vectorstores/marqo.ipynb index 879661fc697..6583563203f 100644 --- a/docs/docs/integrations/vectorstores/marqo.ipynb +++ b/docs/docs/integrations/vectorstores/marqo.ipynb @@ -12,6 +12,8 @@ "\n", ">[Marqo](https://www.marqo.ai/) is an open-source vector search engine. Marqo allows you to store and query multi-modal data such as text and images. Marqo creates the vectors for you using a huge selection of open-source models, you can also provide your own fine-tuned models and Marqo will handle the loading and inference for you.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "To run this notebook with our docker image please run the following commands first to get Marqo:\n", "\n", "```\n", diff --git a/docs/docs/integrations/vectorstores/meilisearch.ipynb b/docs/docs/integrations/vectorstores/meilisearch.ipynb index 97c8d5cade4..176671bfa6b 100644 --- a/docs/docs/integrations/vectorstores/meilisearch.ipynb +++ b/docs/docs/integrations/vectorstores/meilisearch.ipynb @@ -10,7 +10,9 @@ ">\n", "> You can [self-host Meilisearch](https://www.meilisearch.com/docs/learn/getting_started/installation#local-installation) or run on [Meilisearch Cloud](https://www.meilisearch.com/pricing).\n", "\n", - "Meilisearch v1.3 supports vector search. This page guides you through integrating Meilisearch as a vector store and using it to perform vector search." + "Meilisearch v1.3 supports vector search. This page guides you through integrating Meilisearch as a vector store and using it to perform vector search.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/milvus.ipynb b/docs/docs/integrations/vectorstores/milvus.ipynb index 6eaabfef79e..7fb751453c5 100644 --- a/docs/docs/integrations/vectorstores/milvus.ipynb +++ b/docs/docs/integrations/vectorstores/milvus.ipynb @@ -11,7 +11,7 @@ "\n", "This notebook shows how to use functionality related to the Milvus vector database.\n", "\n", - "To run, you should have a [Milvus instance up and running](https://milvus.io/docs/install_standalone-docker.md)." + "You'll need to install `langchain-milvus` with `pip install -qU langchain-milvus` to use this integration\n" ] }, { @@ -26,6 +26,14 @@ "%pip install --upgrade --quiet pymilvus" ] }, + { + "cell_type": "markdown", + "id": "633addc3", + "metadata": {}, + "source": [ + "The latest version of pymilvus comes with a local vector database Milvus Lite, good for prototyping. If you have large scale of data such as more than a million docs, we recommend setting up a more performant Milvus server on [docker or kubernetes](https://milvus.io/docs/install_standalone-docker.md#Start-Milvus)." + ] + }, { "cell_type": "markdown", "id": "7a0f9e02-8eb0-4aef-b11f-8861360472ee", @@ -41,15 +49,7 @@ "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key:········\n" - ] - } - ], + "outputs": [], "source": [ "import getpass\n", "import os\n", @@ -81,8 +81,6 @@ }, "outputs": [], "source": [ - "from langchain_community.document_loaders import TextLoader\n", - "\n", "loader = TextLoader(\"../../how_to/state_of_the_union.txt\")\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -100,10 +98,14 @@ }, "outputs": [], "source": [ + "# The easiest way is to use Milvus Lite where everything is stored in a local file.\n", + "# If you have a Milvus server you can use the server URI such as \"http://localhost:19530\".\n", + "URI = \"./milvus_demo.db\"\n", + "\n", "vector_db = Milvus.from_documents(\n", " docs,\n", " embeddings,\n", - " connection_args={\"host\": \"127.0.0.1\", \"port\": \"19530\"},\n", + " connection_args={\"uri\": URI},\n", ")" ] }, @@ -168,7 +170,7 @@ " docs,\n", " embeddings,\n", " collection_name=\"collection_1\",\n", - " connection_args={\"host\": \"127.0.0.1\", \"port\": \"19530\"},\n", + " connection_args={\"uri\": URI},\n", ")" ] }, @@ -189,7 +191,7 @@ "source": [ "vector_db = Milvus(\n", " embeddings,\n", - " connection_args={\"host\": \"127.0.0.1\", \"port\": \"19530\"},\n", + " connection_args={\"uri\": URI},\n", " collection_name=\"collection_1\",\n", ")" ] @@ -206,7 +208,6 @@ "cell_type": "markdown", "id": "7fb27b941602401d91542211134fc71a", "metadata": { - "collapsed": false, "pycharm": { "name": "#%% md\n" } @@ -216,7 +217,8 @@ "\n", "When building a retrieval app, you often have to build it with multiple users in mind. This means that you may be storing data not just for one user, but for many different users, and they should not be able to see eachother’s data.\n", "\n", - "Milvus recommends using [partition_key](https://milvus.io/docs/multi_tenancy.md#Partition-key-based-multi-tenancy) to implement multi-tenancy, here is an example." + "Milvus recommends using [partition_key](https://milvus.io/docs/multi_tenancy.md#Partition-key-based-multi-tenancy) to implement multi-tenancy, here is an example.\n", + "> The feature of Partition key is now not available in Milvus Lite, if you want to use it, you need to start Milvus server from [docker or kubernetes](https://milvus.io/docs/install_standalone-docker.md#Start-Milvus)." ] }, { @@ -224,7 +226,6 @@ "execution_count": 2, "id": "acae54e37e7d407bbb7b55eff062a284", "metadata": { - "collapsed": false, "pycharm": { "name": "#%%\n" } @@ -240,7 +241,7 @@ "vectorstore = Milvus.from_documents(\n", " docs,\n", " embeddings,\n", - " connection_args={\"host\": \"127.0.0.1\", \"port\": \"19530\"},\n", + " connection_args={\"uri\": URI},\n", " drop_old=True,\n", " partition_key_field=\"namespace\", # Use the \"namespace\" field as the partition key\n", ")" @@ -250,7 +251,6 @@ "cell_type": "markdown", "id": "9a63283cbaf04dbcab1f6479b197f3a8", "metadata": { - "collapsed": false, "pycharm": { "name": "#%% md\n" } @@ -272,7 +272,6 @@ "execution_count": 3, "id": "8dd0d8092fe74a7c96281538738b07e2", "metadata": { - "collapsed": false, "pycharm": { "name": "#%%\n" } @@ -301,7 +300,6 @@ "execution_count": 4, "id": "72eea5119410473aa328ad9291626812", "metadata": { - "collapsed": false, "pycharm": { "name": "#%%\n" } @@ -330,7 +328,7 @@ "id": "89756e9e", "metadata": {}, "source": [ - "**To delete or upsert (update/insert) one or more entities:**" + "### To delete or upsert (update/insert) one or more entities" ] }, { @@ -351,7 +349,7 @@ "vector_db = Milvus.from_documents(\n", " docs,\n", " embeddings,\n", - " connection_args={\"host\": \"127.0.0.1\", \"port\": \"19530\"},\n", + " connection_args={\"uri\": URI},\n", ")\n", "\n", "# Search pks (primary keys) using expression\n", @@ -387,9 +385,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.12" + "version": "3.9.18" } }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb index 50990c5f400..7df6c4fddf5 100644 --- a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb +++ b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb @@ -48,7 +48,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet momento langchain-openai tiktoken" + "%pip install --upgrade --quiet momento langchain-openai langchain-community tiktoken" ] }, { diff --git a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb index 24081245dbf..9ceceee0fd1 100644 --- a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb +++ b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb @@ -9,6 +9,8 @@ "\n", ">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS, Azure, and GCP. It now has support for native Vector Search on your MongoDB document data.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use [MongoDB Atlas Vector Search](https://www.mongodb.com/products/platform/atlas-vector-search) to store your embeddings in MongoDB documents, create a vector search index, and perform KNN search with an approximate nearest neighbor algorithm (`Hierarchical Navigable Small Worlds`). It uses the [$vectorSearch MQL Stage](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/). \n", "\n", "\n", diff --git a/docs/docs/integrations/vectorstores/myscale.ipynb b/docs/docs/integrations/vectorstores/myscale.ipynb index 760a55b4620..c75ee44ac79 100644 --- a/docs/docs/integrations/vectorstores/myscale.ipynb +++ b/docs/docs/integrations/vectorstores/myscale.ipynb @@ -31,7 +31,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet clickhouse-connect" + "%pip install --upgrade --quiet clickhouse-connect langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/neo4jvector.ipynb b/docs/docs/integrations/vectorstores/neo4jvector.ipynb index 20788d0d28a..72ca016a351 100644 --- a/docs/docs/integrations/vectorstores/neo4jvector.ipynb +++ b/docs/docs/integrations/vectorstores/neo4jvector.ipynb @@ -34,7 +34,7 @@ "source": [ "# Pip install necessary package\n", "%pip install --upgrade --quiet neo4j\n", - "%pip install --upgrade --quiet langchain-openai\n", + "%pip install --upgrade --quiet langchain-openai langchain-community\n", "%pip install --upgrade --quiet tiktoken" ] }, diff --git a/docs/docs/integrations/vectorstores/nucliadb.ipynb b/docs/docs/integrations/vectorstores/nucliadb.ipynb index 8112a25c3c2..74bc916d3ef 100644 --- a/docs/docs/integrations/vectorstores/nucliadb.ipynb +++ b/docs/docs/integrations/vectorstores/nucliadb.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain nuclia" + "%pip install --upgrade --quiet langchain langchain-community nuclia" ] }, { diff --git a/docs/docs/integrations/vectorstores/opensearch.ipynb b/docs/docs/integrations/vectorstores/opensearch.ipynb index e377273ebe8..febe4b117d6 100644 --- a/docs/docs/integrations/vectorstores/opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/opensearch.ipynb @@ -37,7 +37,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet opensearch-py" + "%pip install --upgrade --quiet opensearch-py langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/oracle.ipynb b/docs/docs/integrations/vectorstores/oracle.ipynb index 862db4673cb..fbdf8085b7e 100644 --- a/docs/docs/integrations/vectorstores/oracle.ipynb +++ b/docs/docs/integrations/vectorstores/oracle.ipynb @@ -43,6 +43,8 @@ "source": [ "### Prerequisites for using Langchain with Oracle AI Vector Search\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "Please install Oracle Python Client driver to use Langchain with Oracle AI Vector Search. " ] }, diff --git a/docs/docs/integrations/vectorstores/pathway.ipynb b/docs/docs/integrations/vectorstores/pathway.ipynb index 9664f0386d2..985344d3420 100644 --- a/docs/docs/integrations/vectorstores/pathway.ipynb +++ b/docs/docs/integrations/vectorstores/pathway.ipynb @@ -18,7 +18,9 @@ "\n", "We will connect to the index using a `VectorStore` client, which implements the `similarity_search` function to retrieve matching documents.\n", "\n", - "The basic pipeline used in this document allows to effortlessly build a simple vector index of files stored in a cloud location. However, Pathway provides everything needed to build realtime data pipelines and apps, including SQL-like able operations such as groupby-reductions and joins between disparate data sources, time-based grouping and windowing of data, and a wide array of connectors.\n" + "The basic pipeline used in this document allows to effortlessly build a simple vector index of files stored in a cloud location. However, Pathway provides everything needed to build realtime data pipelines and apps, including SQL-like able operations such as groupby-reductions and joins between disparate data sources, time-based grouping and windowing of data, and a wide array of connectors.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgembedding.ipynb b/docs/docs/integrations/vectorstores/pgembedding.ipynb index 838a1be2d8b..e1a75c5545e 100644 --- a/docs/docs/integrations/vectorstores/pgembedding.ipynb +++ b/docs/docs/integrations/vectorstores/pgembedding.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "# Pip install necessary package\n", - "%pip install --upgrade --quiet langchain-openai\n", + "%pip install --upgrade --quiet langchain-openai langchain-community\n", "%pip install --upgrade --quiet psycopg2-binary\n", "%pip install --upgrade --quiet tiktoken" ] diff --git a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb index 2eb6aa9f755..e366ee478a8 100644 --- a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb +++ b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb @@ -15,7 +15,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install \"pgvecto_rs[sdk]\"" + "%pip install \"pgvecto_rs[sdk]\" langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/docs/integrations/vectorstores/pinecone.ipynb index c305b5053df..8efd1a4d0d5 100644 --- a/docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/docs/integrations/vectorstores/pinecone.ipynb @@ -29,6 +29,7 @@ " langchain-pinecone \\\n", " langchain-openai \\\n", " langchain \\\n", + " langchain-community \\\n", " pinecone-notebooks" ] }, diff --git a/docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/docs/integrations/vectorstores/qdrant.ipynb index ed825beb002..038ca0d6b66 100644 --- a/docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/docs/integrations/vectorstores/qdrant.ipynb @@ -30,7 +30,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain-qdrant langchain-openai langchain" + "%pip install --upgrade --quiet langchain-qdrant langchain-openai langchain langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/relyt.ipynb b/docs/docs/integrations/vectorstores/relyt.ipynb index 4692e06b027..6fd9aed64be 100644 --- a/docs/docs/integrations/vectorstores/relyt.ipynb +++ b/docs/docs/integrations/vectorstores/relyt.ipynb @@ -22,7 +22,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install \"pgvecto_rs[sdk]\"" + "%pip install \"pgvecto_rs[sdk]\" langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/rockset.ipynb b/docs/docs/integrations/vectorstores/rockset.ipynb index 8d1f5bf147b..3c664daa5d6 100644 --- a/docs/docs/integrations/vectorstores/rockset.ipynb +++ b/docs/docs/integrations/vectorstores/rockset.ipynb @@ -9,7 +9,9 @@ "\n", ">[Rockset](https://rockset.com/) is a real-time search and analytics database built for the cloud. Rockset uses a [Converged Index™](https://rockset.com/blog/converged-indexing-the-secret-sauce-behind-rocksets-fast-queries/) with an efficient store for vector embeddings to serve low latency, high concurrency search queries at scale. Rockset has full support for metadata filtering and handles real-time ingestion for constantly updating, streaming data.\n", "\n", - "This notebook demonstrates how to use `Rockset` as a vector store in LangChain. Before getting started, make sure you have access to a `Rockset` account and an API key available. [Start your free trial today.](https://rockset.com/create/)\n" + "This notebook demonstrates how to use `Rockset` as a vector store in LangChain. Before getting started, make sure you have access to a `Rockset` account and an API key available. [Start your free trial today.](https://rockset.com/create/)\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/sap_hanavector.ipynb b/docs/docs/integrations/vectorstores/sap_hanavector.ipynb index ff84478a188..e6f7c0da455 100644 --- a/docs/docs/integrations/vectorstores/sap_hanavector.ipynb +++ b/docs/docs/integrations/vectorstores/sap_hanavector.ipynb @@ -6,7 +6,9 @@ "source": [ "# SAP HANA Cloud Vector Engine\n", "\n", - ">[SAP HANA Cloud Vector Engine](https://www.sap.com/events/teched/news-guide/ai.html#article8) is a vector store fully integrated into the `SAP HANA Cloud` database." + ">[SAP HANA Cloud Vector Engine](https://www.sap.com/events/teched/news-guide/ai.html#article8) is a vector store fully integrated into the `SAP HANA Cloud` database.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { @@ -39,7 +41,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 11, "metadata": { "ExecuteTime": { "end_time": "2023-09-09T08:02:16.802456Z", @@ -62,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 8, "metadata": { "ExecuteTime": { "end_time": "2023-09-09T08:02:28.174088Z", @@ -100,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": { "ExecuteTime": { "end_time": "2023-09-09T08:02:25.452472Z", @@ -132,7 +134,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "metadata": { "ExecuteTime": { "end_time": "2023-09-09T08:04:16.696625Z", @@ -539,7 +541,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -572,7 +574,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -604,7 +606,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -867,6 +869,113 @@ " print(\"-\" * 80)\n", " print(doc.page_content)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Filter Performance Optimization with Custom Columns" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To allow flexible metadata values, all metadata is stored as JSON in the metadata column by default. If some of the used metadata keys and value types are known, they can be stored in additional columns instead by creating the target table with the key names as column names and passing them to the HanaDB constructor via the specific_metadata_columns list. Metadata keys that match those values are copied into the special column during insert. Filters use the special columns instead of the metadata JSON column for keys in the specific_metadata_columns list." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a new table \"PERFORMANT_CUSTOMTEXT_FILTER\" with three \"standard\" columns and one additional column\n", + "my_own_table_name = \"PERFORMANT_CUSTOMTEXT_FILTER\"\n", + "cur = connection.cursor()\n", + "cur.execute(\n", + " (\n", + " f\"CREATE TABLE {my_own_table_name} (\"\n", + " \"CUSTOMTEXT NVARCHAR(500), \"\n", + " \"MY_TEXT NVARCHAR(2048), \"\n", + " \"MY_METADATA NVARCHAR(1024), \"\n", + " \"MY_VECTOR REAL_VECTOR )\"\n", + " )\n", + ")\n", + "\n", + "# Create a HanaDB instance with the own table\n", + "db = HanaDB(\n", + " connection=connection,\n", + " embedding=embeddings,\n", + " table_name=my_own_table_name,\n", + " content_column=\"MY_TEXT\",\n", + " metadata_column=\"MY_METADATA\",\n", + " vector_column=\"MY_VECTOR\",\n", + " specific_metadata_columns=[\"CUSTOMTEXT\"],\n", + ")\n", + "\n", + "# Add a simple document with some metadata\n", + "docs = [\n", + " Document(\n", + " page_content=\"Some other text\",\n", + " metadata={\n", + " \"start\": 400,\n", + " \"end\": 450,\n", + " \"doc_name\": \"other.txt\",\n", + " \"CUSTOMTEXT\": \"Filters on this value are very performant\",\n", + " },\n", + " )\n", + "]\n", + "db.add_documents(docs)\n", + "\n", + "# Check if data has been inserted into our own table\n", + "cur.execute(f\"SELECT * FROM {my_own_table_name} LIMIT 1\")\n", + "rows = cur.fetchall()\n", + "print(\n", + " rows[0][0]\n", + ") # Value of column \"CUSTOMTEXT\". Should be \"Filters on this value are very performant\"\n", + "print(rows[0][1]) # The text\n", + "print(\n", + " rows[0][2]\n", + ") # The metadata without the \"CUSTOMTEXT\" data, as this is extracted into a sperate column\n", + "print(rows[0][3]) # The vector\n", + "\n", + "cur.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The special columns are completely transparent to the rest of the langchain interface. Everything works as it did before, just more performant." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "docs = [\n", + " Document(\n", + " page_content=\"Some more text\",\n", + " metadata={\n", + " \"start\": 800,\n", + " \"end\": 950,\n", + " \"doc_name\": \"more.txt\",\n", + " \"CUSTOMTEXT\": \"Another customtext value\",\n", + " },\n", + " )\n", + "]\n", + "db.add_documents(docs)\n", + "\n", + "advanced_filter = {\"CUSTOMTEXT\": {\"$like\": \"%value%\"}}\n", + "query = \"What's up?\"\n", + "docs = db.similarity_search(query, k=2, filter=advanced_filter)\n", + "for doc in docs:\n", + " print(\"-\" * 80)\n", + " print(doc.page_content)" + ] } ], "metadata": { @@ -885,7 +994,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.9" } }, "nbformat": 4, diff --git a/docs/docs/integrations/vectorstores/scann.ipynb b/docs/docs/integrations/vectorstores/scann.ipynb index 9f38d86253f..9ff42e8b2cf 100644 --- a/docs/docs/integrations/vectorstores/scann.ipynb +++ b/docs/docs/integrations/vectorstores/scann.ipynb @@ -9,7 +9,9 @@ "\n", "ScaNN (Scalable Nearest Neighbors) is a method for efficient vector similarity search at scale.\n", "\n", - "ScaNN includes search space pruning and quantization for Maximum Inner Product Search and also supports other distance functions such as Euclidean distance. The implementation is optimized for x86 processors with AVX2 support. See its [Google Research github](https://github.com/google-research/google-research/tree/master/scann) for more details." + "ScaNN includes search space pruning and quantization for Maximum Inner Product Search and also supports other distance functions such as Euclidean distance. The implementation is optimized for x86 processors with AVX2 support. See its [Google Research github](https://github.com/google-research/google-research/tree/master/scann) for more details.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/semadb.ipynb b/docs/docs/integrations/vectorstores/semadb.ipynb index b9f94c97b1b..65eca6cebae 100644 --- a/docs/docs/integrations/vectorstores/semadb.ipynb +++ b/docs/docs/integrations/vectorstores/semadb.ipynb @@ -11,7 +11,9 @@ "\n", "The full documentation of the API along with examples and an interactive playground is available on [RapidAPI](https://rapidapi.com/semafind-semadb/api/semadb).\n", "\n", - "This notebook demonstrates usage of the `SemaDB Cloud` vector store." + "This notebook demonstrates usage of the `SemaDB Cloud` vector store.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { @@ -88,7 +90,7 @@ "metadata": {}, "outputs": [ { - "name": "stdin", + "name": "stdout", "output_type": "stream", "text": [ "SemaDB API Key: ········\n" diff --git a/docs/docs/integrations/vectorstores/singlestoredb.ipynb b/docs/docs/integrations/vectorstores/singlestoredb.ipynb index 47cf6cb3075..ba5d9b11746 100644 --- a/docs/docs/integrations/vectorstores/singlestoredb.ipynb +++ b/docs/docs/integrations/vectorstores/singlestoredb.ipynb @@ -16,7 +16,9 @@ "\n", "What sets SingleStoreDB apart is its ability to combine vector and full-text searches in various ways, offering flexibility and versatility. Whether prefiltering by text or vector similarity and selecting the most relevant data, or employing a weighted sum approach to compute a final similarity score, developers have multiple options at their disposal.\n", "\n", - "In essence, SingleStoreDB provides a comprehensive solution for managing and querying vector data, offering unparalleled performance and flexibility for AI-driven applications." + "In essence, SingleStoreDB provides a comprehensive solution for managing and querying vector data, offering unparalleled performance and flexibility for AI-driven applications.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/sklearn.ipynb b/docs/docs/integrations/vectorstores/sklearn.ipynb index 66a16d31d60..af1c93c02d1 100644 --- a/docs/docs/integrations/vectorstores/sklearn.ipynb +++ b/docs/docs/integrations/vectorstores/sklearn.ipynb @@ -8,7 +8,9 @@ "\n", ">[scikit-learn](https://scikit-learn.org/stable/) is an open-source collection of machine learning algorithms, including some implementations of the [k nearest neighbors](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). `SKLearnVectorStore` wraps this implementation and adds the possibility to persist the vector store in json, bson (binary json) or Apache Parquet format.\n", "\n", - "This notebook shows how to use the `SKLearnVectorStore` vector database." + "This notebook shows how to use the `SKLearnVectorStore` vector database.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/sqlitevss.ipynb b/docs/docs/integrations/vectorstores/sqlitevss.ipynb index 03b30e347fd..1d036ca1947 100644 --- a/docs/docs/integrations/vectorstores/sqlitevss.ipynb +++ b/docs/docs/integrations/vectorstores/sqlitevss.ipynb @@ -13,6 +13,8 @@ "\n", ">[SQLite-VSS](https://alexgarcia.xyz/sqlite-vss/) is an `SQLite` extension designed for vector search, emphasizing local-first operations and easy integration into applications without external servers. Leveraging the `Faiss` library, it offers efficient similarity search and clustering capabilities.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "This notebook shows how to use the `SQLiteVSS` vector database." ] }, diff --git a/docs/docs/integrations/vectorstores/starrocks.ipynb b/docs/docs/integrations/vectorstores/starrocks.ipynb index f00cec80492..e6b55b600eb 100644 --- a/docs/docs/integrations/vectorstores/starrocks.ipynb +++ b/docs/docs/integrations/vectorstores/starrocks.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet pymysql" + "%pip install --upgrade --quiet pymysql langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/supabase.ipynb b/docs/docs/integrations/vectorstores/supabase.ipynb index e13d11e5ab6..ca9b2968862 100644 --- a/docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/docs/integrations/vectorstores/supabase.ipynb @@ -19,6 +19,8 @@ "\n", "This notebook shows how to use `Supabase` and `pgvector` as your VectorStore.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "To run this notebook, please ensure:\n", "- the `pgvector` extension is enabled\n", "- you have installed the `supabase-py` package\n", diff --git a/docs/docs/integrations/vectorstores/tair.ipynb b/docs/docs/integrations/vectorstores/tair.ipynb index ad0f066256d..81aa08652c0 100644 --- a/docs/docs/integrations/vectorstores/tair.ipynb +++ b/docs/docs/integrations/vectorstores/tair.ipynb @@ -11,6 +11,8 @@ "\n", "This notebook shows how to use functionality related to the `Tair` vector database.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "To run, you should have a `Tair` instance up and running." ] }, diff --git a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb index f07d46449ed..532b22a2f01 100644 --- a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb +++ b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip3 install tcvectordb" + "!pip3 install tcvectordb langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/thirdai_neuraldb.ipynb b/docs/docs/integrations/vectorstores/thirdai_neuraldb.ipynb index 4eb85227605..eb830485a97 100644 --- a/docs/docs/integrations/vectorstores/thirdai_neuraldb.ipynb +++ b/docs/docs/integrations/vectorstores/thirdai_neuraldb.ipynb @@ -16,7 +16,9 @@ "\n", "For all of the following initialization methods, the `thirdai_key` parameter can be omitted if the `THIRDAI_KEY` environment variable is set.\n", "\n", - "ThirdAI API keys can be obtained at https://www.thirdai.com/try-bolt/" + "ThirdAI API keys can be obtained at https://www.thirdai.com/try-bolt/\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { @@ -25,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import NeuralDBVectorStore\n", + "from langchain_community.vectorstores import NeuralDBVectorStore\n", "\n", "# From scratch\n", "vectorstore = NeuralDBVectorStore.from_scratch(thirdai_key=\"your-thirdai-key\")\n", diff --git a/docs/docs/integrations/vectorstores/tidb_vector.ipynb b/docs/docs/integrations/vectorstores/tidb_vector.ipynb index 0f453cb9c9a..ee40990ac59 100644 --- a/docs/docs/integrations/vectorstores/tidb_vector.ipynb +++ b/docs/docs/integrations/vectorstores/tidb_vector.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install langchain\n", + "%pip install langchain langchain-community\n", "%pip install langchain-openai\n", "%pip install pymysql\n", "%pip install tidb-vector" diff --git a/docs/docs/integrations/vectorstores/tigris.ipynb b/docs/docs/integrations/vectorstores/tigris.ipynb index 3d5ea550f25..5ca1e57935a 100644 --- a/docs/docs/integrations/vectorstores/tigris.ipynb +++ b/docs/docs/integrations/vectorstores/tigris.ipynb @@ -44,7 +44,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet tigrisdb openapi-schema-pydantic langchain-openai tiktoken" + "%pip install --upgrade --quiet tigrisdb openapi-schema-pydantic langchain-openai langchain-community tiktoken" ] }, { diff --git a/docs/docs/integrations/vectorstores/tiledb.ipynb b/docs/docs/integrations/vectorstores/tiledb.ipynb index c5fc0aec26a..4da8ebd17a1 100644 --- a/docs/docs/integrations/vectorstores/tiledb.ipynb +++ b/docs/docs/integrations/vectorstores/tiledb.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet tiledb-vector-search" + "%pip install --upgrade --quiet tiledb-vector-search langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/timescalevector.ipynb b/docs/docs/integrations/vectorstores/timescalevector.ipynb index 8bd067979f7..8e11ff203cf 100644 --- a/docs/docs/integrations/vectorstores/timescalevector.ipynb +++ b/docs/docs/integrations/vectorstores/timescalevector.ipynb @@ -53,7 +53,7 @@ "source": [ "# Pip install necessary packages\n", "%pip install --upgrade --quiet timescale-vector\n", - "%pip install --upgrade --quiet langchain-openai\n", + "%pip install --upgrade --quiet langchain-openai langchain-community\n", "%pip install --upgrade --quiet tiktoken" ] }, diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb index 77726f1cbb9..8b3c11b7e29 100644 --- a/docs/docs/integrations/vectorstores/typesense.ipynb +++ b/docs/docs/integrations/vectorstores/typesense.ipynb @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet typesense openapi-schema-pydantic langchain-openai tiktoken" + "%pip install --upgrade --quiet typesense openapi-schema-pydantic langchain-openai langchain-community tiktoken" ] }, { diff --git a/docs/docs/integrations/vectorstores/upstash.ipynb b/docs/docs/integrations/vectorstores/upstash.ipynb index fe49cd59666..03b1909e74f 100644 --- a/docs/docs/integrations/vectorstores/upstash.ipynb +++ b/docs/docs/integrations/vectorstores/upstash.ipynb @@ -38,7 +38,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install langchain-openai langchain upstash-vector" + "%pip install langchain-openai langchain langchain-community upstash-vector" ] }, { diff --git a/docs/docs/integrations/vectorstores/usearch.ipynb b/docs/docs/integrations/vectorstores/usearch.ipynb index c691799ce8d..f944c3360da 100644 --- a/docs/docs/integrations/vectorstores/usearch.ipynb +++ b/docs/docs/integrations/vectorstores/usearch.ipynb @@ -20,7 +20,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet usearch" + "%pip install --upgrade --quiet usearch langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/vald.ipynb b/docs/docs/integrations/vectorstores/vald.ipynb index 96dc0df063d..4a994886327 100644 --- a/docs/docs/integrations/vectorstores/vald.ipynb +++ b/docs/docs/integrations/vectorstores/vald.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet vald-client-python" + "%pip install --upgrade --quiet vald-client-python langchain-community" ] }, { diff --git a/docs/docs/integrations/vectorstores/vdms.ipynb b/docs/docs/integrations/vectorstores/vdms.ipynb index 752c8876232..6a4a76bcfc7 100644 --- a/docs/docs/integrations/vectorstores/vdms.ipynb +++ b/docs/docs/integrations/vectorstores/vdms.ipynb @@ -19,6 +19,8 @@ "\n", "This notebook shows how to use VDMS as a vector store using the docker image.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "To begin, install the Python packages for the VDMS client and Sentence Transformers:" ] }, @@ -847,7 +849,7 @@ "print_response([response[0][\"FindDescriptor\"][\"entities\"][0]])\n", "\n", "# Delete id=2\n", - "db.delete(collection_name=collection_name, ids=[\"2\"]);" + "db.delete(collection_name=collection_name, ids=[\"2\"])" ] }, { diff --git a/docs/docs/integrations/vectorstores/vearch.ipynb b/docs/docs/integrations/vectorstores/vearch.ipynb index 657ae0f7012..da2efb893a7 100644 --- a/docs/docs/integrations/vectorstores/vearch.ipynb +++ b/docs/docs/integrations/vectorstores/vearch.ipynb @@ -15,7 +15,9 @@ "source": [ "## Setting up\n", "\n", - "Follow [instructions](https://vearch.readthedocs.io/en/latest/quick-start-guide.html#)." + "Follow [instructions](https://vearch.readthedocs.io/en/latest/quick-start-guide.html#).\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/vectara.ipynb b/docs/docs/integrations/vectorstores/vectara.ipynb index 70756b58d91..496ba565659 100644 --- a/docs/docs/integrations/vectorstores/vectara.ipynb +++ b/docs/docs/integrations/vectorstores/vectara.ipynb @@ -21,7 +21,9 @@ "\n", "See the [Vectara API documentation](https://docs.vectara.com/docs/) for more information on how to use the API.\n", "\n", - "This notebook shows how to use the basic retrieval functionality, when utilizing Vectara just as a Vector Store (without summarization), incuding: `similarity_search` and `similarity_search_with_score` as well as using the LangChain `as_retriever` functionality." + "This notebook shows how to use the basic retrieval functionality, when utilizing Vectara just as a Vector Store (without summarization), incuding: `similarity_search` and `similarity_search_with_score` as well as using the LangChain `as_retriever` functionality.\n", + "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration" ] }, { diff --git a/docs/docs/integrations/vectorstores/vespa.ipynb b/docs/docs/integrations/vectorstores/vespa.ipynb index abacdc296ca..a6991f7f965 100644 --- a/docs/docs/integrations/vectorstores/vespa.ipynb +++ b/docs/docs/integrations/vectorstores/vespa.ipynb @@ -11,6 +11,8 @@ "\n", "This notebook shows how to use `Vespa.ai` as a LangChain vector store.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "In order to create the vector store, we use\n", "[pyvespa](https://pyvespa.readthedocs.io/en/latest/index.html) to create a\n", "connection a `Vespa` service." diff --git a/docs/docs/integrations/vectorstores/vikingdb.ipynb b/docs/docs/integrations/vectorstores/vikingdb.ipynb index 3e8d32f4ae2..14cd3573185 100644 --- a/docs/docs/integrations/vectorstores/vikingdb.ipynb +++ b/docs/docs/integrations/vectorstores/vikingdb.ipynb @@ -13,6 +13,8 @@ "\n", "This notebook shows how to use functionality related to the VikingDB vector database.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "To run, you should have a [viking DB instance up and running](https://www.volcengine.com/docs/6459/1165058).\n", "\n", "\n" diff --git a/docs/docs/integrations/vectorstores/vlite.ipynb b/docs/docs/integrations/vectorstores/vlite.ipynb index 46a2f46a447..8a5dd0d2a72 100644 --- a/docs/docs/integrations/vectorstores/vlite.ipynb +++ b/docs/docs/integrations/vectorstores/vlite.ipynb @@ -9,6 +9,8 @@ "\n", "VLite is a simple and blazing fast vector database that allows you to store and retrieve data semantically using embeddings. Made with numpy, vlite is a lightweight batteries-included database to implement RAG, similarity search, and embeddings into your projects.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "## Installation\n", "\n", "To use the VLite in LangChain, you need to install the `vlite` package:\n", @@ -20,7 +22,7 @@ "## Importing VLite\n", "\n", "```python\n", - "from langchain.vectorstores import VLite\n", + "from langchain_community.vectorstores import VLite\n", "```\n", "\n", "## Basic Example\n", diff --git a/docs/docs/integrations/vectorstores/xata.ipynb b/docs/docs/integrations/vectorstores/xata.ipynb index 10074c047a4..83afa4d89eb 100644 --- a/docs/docs/integrations/vectorstores/xata.ipynb +++ b/docs/docs/integrations/vectorstores/xata.ipynb @@ -52,7 +52,7 @@ }, "outputs": [], "source": [ - "%pip install --upgrade --quiet xata langchain-openai tiktoken langchain" + "%pip install --upgrade --quiet xata langchain-openai langchain-community tiktoken langchain" ] }, { diff --git a/docs/docs/integrations/vectorstores/yellowbrick.ipynb b/docs/docs/integrations/vectorstores/yellowbrick.ipynb index a8ccb56eb7d..8c155175f43 100644 --- a/docs/docs/integrations/vectorstores/yellowbrick.ipynb +++ b/docs/docs/integrations/vectorstores/yellowbrick.ipynb @@ -34,7 +34,7 @@ "source": [ "# Install all needed libraries\n", "%pip install --upgrade --quiet langchain\n", - "%pip install --upgrade --quiet langchain-openai\n", + "%pip install --upgrade --quiet langchain-openai langchain-community\n", "%pip install --upgrade --quiet psycopg2-binary\n", "%pip install --upgrade --quiet tiktoken" ] @@ -324,7 +324,7 @@ "vector_store = Yellowbrick.from_documents(\n", " documents=split_docs,\n", " embedding=embeddings,\n", - " connection_info=yellowbrick_connection_string,\n", + " connection_string=yellowbrick_connection_string,\n", " table=embedding_table,\n", ")\n", "\n", diff --git a/docs/docs/integrations/vectorstores/zep.ipynb b/docs/docs/integrations/vectorstores/zep.ipynb index 5e5bd852483..afff8415f8e 100644 --- a/docs/docs/integrations/vectorstores/zep.ipynb +++ b/docs/docs/integrations/vectorstores/zep.ipynb @@ -22,6 +22,8 @@ ">\n", "> Zep Open Source Docs: [https://docs.getzep.com/](https://docs.getzep.com/)\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "## Usage\n", "\n", "In the examples below, we're using Zep's auto-embedding feature which automatically embeds documents on the Zep server \n", diff --git a/docs/docs/integrations/vectorstores/zep_cloud.ipynb b/docs/docs/integrations/vectorstores/zep_cloud.ipynb new file mode 100644 index 00000000000..5d130f68ce8 --- /dev/null +++ b/docs/docs/integrations/vectorstores/zep_cloud.ipynb @@ -0,0 +1,484 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "550edc01b00149cd", + "metadata": { + "collapsed": false + }, + "source": [ + "# Zep Cloud\n", + "> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n", + "\n", + "> [Zep](https://www.getzep.com) is a long-term memory service for AI Assistant apps.\n", + "> With Zep, you can provide AI assistants with the ability to recall past conversations, no matter how distant,\n", + "> while also reducing hallucinations, latency, and cost.\n", + "\n", + "> See [Zep Cloud Installation Guide](https://help.getzep.com/sdks)\n", + "\n", + "## Usage\n", + "\n", + "In the examples below, we're using Zep's auto-embedding feature which automatically embeds documents on the Zep server \n", + "using low-latency embedding models.\n", + "\n", + "## Note\n", + "- These examples use Zep's async interfaces. Call sync interfaces by removing the `a` prefix from the method names." + ] + }, + { + "cell_type": "markdown", + "id": "9a3a11aab1412d98", + "metadata": { + "collapsed": false + }, + "source": [ + "## Load or create a Collection from documents" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "519418421a32e4d", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T03:10:19.459062Z", + "start_time": "2024-05-10T03:10:18.090479Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "from uuid import uuid4\n", + "\n", + "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_community.vectorstores import ZepCloudVectorStore\n", + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "\n", + "ZEP_API_KEY = \"\" # You can generate your zep project key from the Zep dashboard\n", + "collection_name = f\"babbage{uuid4().hex}\" # a unique collection name. alphanum only\n", + "\n", + "# load the document\n", + "article_url = \"https://www.gutenberg.org/cache/epub/71292/pg71292.txt\"\n", + "loader = WebBaseLoader(article_url)\n", + "documents = loader.load()\n", + "\n", + "# split it into chunks\n", + "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "# Instantiate the VectorStore. Since the collection does not already exist in Zep,\n", + "# it will be created and populated with the documents we pass in.\n", + "vs = ZepCloudVectorStore.from_documents(\n", + " docs,\n", + " embedding=None,\n", + " collection_name=collection_name,\n", + " api_key=ZEP_API_KEY,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "201dc57b124cb6d7", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T03:10:24.393735Z", + "start_time": "2024-05-10T03:10:23.131246Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Embedding status: 401/401 documents embedded\n" + ] + } + ], + "source": [ + "# wait for the collection embedding to complete\n", + "\n", + "\n", + "async def wait_for_ready(collection_name: str) -> None:\n", + " import time\n", + "\n", + " from zep_cloud.client import AsyncZep\n", + "\n", + " client = AsyncZep(api_key=ZEP_API_KEY)\n", + "\n", + " while True:\n", + " c = await client.document.get_collection(collection_name)\n", + " print(\n", + " \"Embedding status: \"\n", + " f\"{c.document_embedded_count}/{c.document_count} documents embedded\"\n", + " )\n", + " time.sleep(1)\n", + " if c.document_embedded_count == c.document_count:\n", + " break\n", + "\n", + "\n", + "await wait_for_ready(collection_name)" + ] + }, + { + "cell_type": "markdown", + "id": "94ca9dfa7d0ecaa5", + "metadata": { + "collapsed": false + }, + "source": [ + "## Simarility Search Query over the Collection" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1998de0a96fe89c3", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T02:56:13.039583Z", + "start_time": "2024-05-10T02:56:12.825349Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "the positions of the two principal planets, (and these the most\r\n", + "necessary for the navigator,) Jupiter and Saturn, require each not less\r\n", + "than one hundred and sixteen tables. Yet it is not only necessary to\r\n", + "predict the position of these bodies, but it is likewise expedient to\r\n", + "tabulate the motions of the four satellites of Jupiter, to predict the\r\n", + "exact times at which they enter his shadow, and at which their shadows\r\n", + "cross his disc, as well as the times at which they are interposed -> 0.78691166639328 \n", + "====\n", + "\n", + "are reduced to a system of wheel-work. We are, nevertheless, not without\r\n", + "hopes of conveying, even to readers unskilled in mathematics, some\r\n", + "satisfactory notions of a general nature on this subject.\r\n", + "\r\n", + "_Thirdly_, To explain the actual state of the machinery at the present\r\n", + "time; what progress has been made towards its completion; and what are\r\n", + "the probable causes of those delays in its progress, which must be a\r\n", + "subject of regret to all friends of science. We shall indicate what -> 0.7853284478187561 \n", + "====\n", + "\n", + "from the improved state of astronomy, he found it necessary to recompute\r\n", + "these tables in 1821.\r\n", + "\r\n", + "Although it is now about thirty years since the discovery of the four\r\n", + "new planets, Ceres, Pallas, Juno, and Vesta, it was not till recently\r\n", + "that tables of their motions were published. They have lately appeared\r\n", + "in Encke's Ephemeris.\r\n", + "\r\n", + "We have thus attempted to convey some notion (though necessarily a very\r\n", + "inadequate one) of the immense extent of numerical tables which it has -> 0.7840130925178528 \n", + "====\n", + "\n" + ] + } + ], + "source": [ + "# query it\n", + "query = \"what is the structure of our solar system?\"\n", + "docs_scores = await vs.asimilarity_search_with_relevance_scores(query, k=3)\n", + "\n", + "# print results\n", + "for d, s in docs_scores:\n", + " print(d.page_content, \" -> \", s, \"\\n====\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "e02b61a9af0b2c80", + "metadata": { + "collapsed": false + }, + "source": [ + "## Search over Collection Re-ranked by MMR\n", + "\n", + "Zep offers native, hardware-accelerated MMR re-ranking of search results." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "488112da752b1d58", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T02:56:16.596274Z", + "start_time": "2024-05-10T02:56:16.284597Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "the positions of the two principal planets, (and these the most\r\n", + "necessary for the navigator,) Jupiter and Saturn, require each not less\r\n", + "than one hundred and sixteen tables. Yet it is not only necessary to\r\n", + "predict the position of these bodies, but it is likewise expedient to\r\n", + "tabulate the motions of the four satellites of Jupiter, to predict the\r\n", + "exact times at which they enter his shadow, and at which their shadows\r\n", + "cross his disc, as well as the times at which they are interposed \n", + "====\n", + "\n", + "are reduced to a system of wheel-work. We are, nevertheless, not without\r\n", + "hopes of conveying, even to readers unskilled in mathematics, some\r\n", + "satisfactory notions of a general nature on this subject.\r\n", + "\r\n", + "_Thirdly_, To explain the actual state of the machinery at the present\r\n", + "time; what progress has been made towards its completion; and what are\r\n", + "the probable causes of those delays in its progress, which must be a\r\n", + "subject of regret to all friends of science. We shall indicate what \n", + "====\n", + "\n", + "general commerce. But the science in which, above all others, the most\r\n", + "extensive and accurate tables are indispensable, is Astronomy; with the\r\n", + "improvement and perfection of which is inseparably connected that of the\r\n", + "kindred art of Navigation. We scarcely dare hope to convey to the\r\n", + "general reader any thing approaching to an adequate notion of the\r\n", + "multiplicity and complexity of the tables necessary for the purposes of\r\n", + "the astronomer and navigator. We feel, nevertheless, that the truly \n", + "====\n", + "\n" + ] + } + ], + "source": [ + "query = \"what is the structure of our solar system?\"\n", + "docs = await vs.asearch(query, search_type=\"mmr\", k=3)\n", + "\n", + "for d in docs:\n", + " print(d.page_content, \"\\n====\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "42455e31d4ab0d68", + "metadata": { + "collapsed": false + }, + "source": [ + "# Filter by Metadata\n", + "\n", + "Use a metadata filter to narrow down results. First, load another book: \"Adventures of Sherlock Holmes\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "146c8a96201c0ab9", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T03:10:38.028478Z", + "start_time": "2024-05-10T03:10:28.620287Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Adding documents\n", + "generated documents 1290\n", + "added documents ['ddec2883-5776-47be-a7bc-23e851e969a9', '19d2a5ce-0a36-4d32-b522-2a54a69970e8', '5e6d8ed2-4527-4515-9557-f178b2682df8', 'ad2b04d9-e1f1-4d90-9920-ab6d95c63dd7', '19871eb4-b624-49d1-a160-a24487298ac6', '19e9e04b-6b3f-4a91-9717-1565a5065642', '0e9a6411-a6a6-4453-a655-2a251519a1fa', '51de5926-dbb1-4d58-b6d4-47c8e5988042', '244dc860-2bf0-41a1-aa3c-e170c0209c5a', '175e8811-70cd-499c-a35f-2502749bff2e', '5298951a-1087-45f4-afff-3466032301fc', 'd87f4495-95c9-4152-bf1b-90f73652e639', '638f6b85-1e05-462f-8c75-4c642a309d88', '92f7f430-9ffc-4ca5-ac92-953793a4a7b0', '5ce532f2-86b0-4953-8815-96b7fb0da046', '8187857a-02b6-4faa-80b3-ca6d64b0ae59', '599d479c-7c2e-4ad9-b5e4-651fced1f38c', 'e0869e51-92a5-4c87-ba35-a977c3fe43d2', 'bed376d9-3785-4fa3-a986-4d069e817a0e', '31e8e7b1-164b-4872-8a21-0a417d9c2793', '87f96ecb-c78a-4aaf-a976-557a76adecf1', '8f0e7eb2-fb67-4010-bec4-70f1ea319d22', '5d280424-00cc-475c-8334-3ac1dbda84c9', 'a27876c9-68c2-460c-bce1-43cbc734885a', '492fe1fc-0dc8-45ee-a343-3fc514014298', '964a4887-0a21-442b-8677-9568c8ea5b4a', '87b82a96-c981-454e-87f6-171cbaa90e20', '14014728-1617-4ff5-8f56-b2cbcdf08bcf', 'af95852d-e7ce-4db7-9b47-3cfbffcfd81b', '1f6526bf-f5a9-4a1a-9a9f-42e6d175d9d5', '0a214058-d318-4f75-a413-140a4bdbc55f', '0eee06d6-d7e0-4097-8231-af823635f786', '7e74bdc5-f42f-474e-8c3d-0a346227f1e6', 'c8959f06-5282-47e8-92fe-16a90bb20132', 'd895bdcb-b7d1-40fe-a9a8-92647630688a', 'ebb30286-4eba-4733-94c7-f2e92cfcd0a3', '02c87eb9-031d-44bc-ad9a-c3b07b6d879c', '5338e6e3-ebfd-42b9-ba71-4eb728a9aba0', 'b372dba5-f1e7-4cff-9ba2-17132b51423d', 'bad22837-aba8-4431-a742-e1642abdc1d3', 'e73f3146-9ac1-4517-8905-45ba31de1551', '6903a3cb-acb1-42cd-9653-b208487368e9', '92600304-7eb9-4a6f-9a27-ba14a28fc636', '44e720af-efcf-477a-a655-08967ff3159a', '61565e97-7aa5-4c8c-a330-86ba265824d6', 'de10ad67-1992-4c85-8112-c1a2cd93edc0', '103d4a40-789a-4797-ad86-e50b2efe93e8', 'a5940a28-a9db-435c-b3c2-3485fafba55a', '637475cc-9a54-4ced-ab03-4a5dddb1e152', 'b209df0d-6513-4a74-b0ea-7bb2b2e01c4b', 'e6ee7511-eec8-4b20-94b1-e338093c9760', 'ef4ceb0d-a990-4cc2-91ed-320e1552c98d', '8058b68a-7ce5-4fb9-9f89-ed1c9e04b60e', 'ddebdfe0-87c1-4c4f-9d1f-fdc32b6bd5e2', '5211afd6-3076-40ec-b964-8c4a165b6e3d', 'ac5e9a17-ff57-4885-b3b7-4229d09a2077', 'fcea3094-7d86-4c05-9780-5c894197b65a', '81b0bdd9-5be6-4b95-89db-fe737446512a', '68f42964-0c21-4409-8356-067bbc5eddb0', '28b3dbc4-3b5f-4a7e-9906-b34a30832384', '83a15ca3-b54b-449a-813b-76db3e1f11ea', '1fd0d950-e1e3-4c5a-b62c-487ed11e06ac', '70253ef9-e4c3-4f9e-aa51-18b2b067d2d5', 'f5538e25-1e4e-438f-b1ef-ebc359085df8', '643e6f1e-b5dd-4630-9e30-f323abff5261', 'ef370f62-6a99-4116-9843-ad3f36ca0779', '915d4b2b-5467-4398-ab65-369250ca1245', '830c873a-eb80-40f9-9f6b-92f3b7be4f19', 'a965a589-8717-43d0-9419-7e8fefda070d', 'd686b00b-f91c-4d80-ae4d-5e2b6c227c86', '264951a6-5788-4a22-b925-758bcf0c499e', '32227ea1-1aa6-470c-9148-911c3fdda203', '97474d19-ef5d-4f14-8ffd-09d68bf5960a', '71fc221f-0048-4fbd-a68e-7c40885e091c', 'ef7a0a8c-16fd-4a2a-8d70-0fbf086b312e', 'fae9bea0-9815-4495-87a6-72700c728e71', '3cafc749-c648-477f-9b3d-33eacddc42aa', '31756e9d-993d-4653-b521-ef5cd653ab3a', '6c9b28ad-adde-4055-87f4-e34c387ab607', 'dfddcef4-9229-4c2c-9deb-6533f6c87bfe', '0511f770-041c-405d-a9f0-5e9a2c62563e', 'e9e8f9b2-1879-4528-bc72-1318bd026f53', 'dbcc7682-cf0e-49db-998c-d581df5cc0b9', '95c3a979-92fd-4268-9b1f-36f526fb1115', 'fb602a32-87c2-4fa1-9e15-8d69fd780baf', '163489b5-20a9-4eba-a9cc-660c7d5c0d21', 'beef4aaa-7eed-4dec-a953-de40a20efc9e', 'c61782a0-184b-43c7-a232-772087bdafa1', 'acf045d9-c43d-4e16-87d1-93b142bd3c3a', 'bb014519-b473-4329-8045-7dbe33200c34', 'ecb12661-86b1-47f7-a4e8-6854beedbe46', 'c2450f1b-593f-4b8f-ba80-ec56bcf802ae', 'bc47a6b4-8a4b-4e0a-9fd2-a9999d0c6c74', '3f2581c2-1c91-4e57-830b-a08cd7b09f9f', '4441431d-d3be-49f5-bf07-d415baced24d', 'bcd90088-0313-4647-a83d-673e470565a0', '3949bacf-8bb5-451d-81f4-f902f2fe8aca', '7925ac14-5ce2-4717-afcb-86da4ec6f933', 'ddbb242d-c914-4608-97c0-d3c5ae817a15', '7c351dfc-8f19-4a49-9a65-b79b5ed8aa07', '65db2c1b-36e1-4b38-8d28-554c37f1c897', '5674788e-a430-4706-9801-cae72ce45763', 'd59612f3-2a65-489a-87fd-27123d8258c0', 'd62368e3-7e43-454d-b5fb-b4e2f37badab', '55a71614-29b6-44ba-ba64-b6444bbb432a', 'f03f605b-3770-48d8-8e96-6b3fb7915ea5', 'a51f8dd4-883a-4bbd-92b2-34581756f341', '6993ea75-5ebe-4aff-be14-3efb9107f616', '99501367-dc36-4589-97a0-0cc9e066c042', '7a5e56c8-48d6-42ee-8a8e-431b31669d98', 'b5394c35-0112-4fc5-9336-3c614d2326d4', '8be29e49-a2c3-4a0a-bc3e-99b74000f834', 'b714564c-8f18-4d0d-b530-cdc593dc4914', '0c93f74d-b20e-41b6-a9dc-709717bfe168', '51ce05f9-40c4-4d12-96ac-9245402a49c8', 'c18e4c15-edbe-4a78-98d2-2979d69b1ebf', 'c3dc5265-f1c2-4057-8408-a3a2ee71f77f', '9dc4d84e-6f47-4516-8173-3dded3aad48a', '36b19b4c-c846-456b-b7ce-76aee21c3783', '74a69f64-0b90-439a-82e8-245063711c13', '11ef7bc0-ac49-4368-ba83-6126d3eae999', '60afad3c-4c1f-4968-800d-6493338be615', '3d44d417-d82c-44e3-aae7-431f920974d9', '2448df31-e100-494f-abea-5815adf0fa5a', 'f654ee44-45b2-48c3-b1d7-5b80420d680f', '454bd226-0d6b-40a2-b0a0-a2de2a6aa8d4', 'd728840f-8f90-4c83-9329-268921bc92b4', '404234bb-5592-425e-b3c5-d391bcdc5f15', '9cab7401-f7b6-4379-81d0-100aa1cd4ab2', 'adde751f-d319-4534-9100-c8a1c4f04729', '54b173cb-80f0-46c8-bdcc-4d56fd38a6fe', 'bc3555da-1e7b-4248-bf4d-2229520b75f5', 'cf8fa6c5-63cf-43e2-a577-c6eb083c1f81', 'bf1f2c37-9c67-4478-83be-952611c14771', 'ea2d5b80-7dfd-4655-b1a8-0f6f235ac818', '6837c567-092c-4568-bb33-6f8b0a1acfd0', 'cb834e5e-222b-45ed-b2e7-339803e8825c', '37e9723d-263f-47d7-8ce3-27fabba84c7e', 'b63e0f9d-c463-4d03-84e4-66abf9e2f2ed', '2b57cad2-795d-47c6-be27-9566fa63794d', '81807f59-2354-48f8-b78c-296b20cf5586', '15459589-637e-44a5-b2a4-cbb94f2b4176', '29dd752c-2802-4806-8f97-2518fae5bcc5', '424c81bc-f21a-4646-a3fa-12f5694d828e', '398253e5-c4b3-4784-a60a-010e8b289d59', 'e43dd183-64b5-4821-b0f0-e5f759d3e974', '09bdc512-544b-42ef-80a9-2122d217bf8e', '27117536-c539-4eb5-8c67-f090a230f0c3', 'd68e3696-4a01-43ea-b763-d592eed48152', '8c771c9d-6596-479d-9572-0bf5141496f1', 'df39063a-62ad-4331-96fc-c1f064042bf8', 'a4357878-5a22-4b48-bbf0-ba9b19d6003e', '054a4f9c-6237-4624-ba55-a6d4dde70f33', '5ffdfff1-8379-4187-9ab3-8e33367855c2', '57f51416-cec0-437c-8e75-8c53b16a1e1d', 'ad030ad5-0b65-4160-8f7f-4d4aa04df8bb', '2350031c-86db-4926-baef-4337fdf0f501', '194a3bdd-5f37-4056-877f-e3a9f14e717b', 'c5ce3142-2dbc-4284-9a1b-6a82f17496b0', 'e86823b9-bdb3-4d1a-b929-8ec5ea69fb3a', '5c2913ed-c4e2-4fe0-81ff-406bb9b452ad', 'b2c7b0b9-5288-4000-89d7-43c1c21179bf', 'bd1e3aac-bf70-45c0-a519-91556bdeb3ec', '3b63324c-96ed-4d6d-bac7-a611fdbe4eef', '670050eb-6956-47a2-8fc0-a297c1cc5cb0', '53874ec0-1513-4603-a805-23433947b10a', 'd31c0999-46ea-4d4f-a60b-b3290931efe6', '39d99205-5de0-4234-b846-9758e67eb02b', 'aae705a4-9398-4068-90f5-59ae5569894f', '2d4e003e-ab3d-4759-b5bc-681d5b082f92', '67cd305c-7f2d-4847-8e0f-5cfb6708db8b', 'b5314a0a-57ea-4e7a-a47f-eccf626c2370', 'a5e7a9a7-824b-4622-8b55-8cbdfd5a0d00', '818cd68e-6f21-431c-9bb6-c7e03aa07327', '3bd22a1c-7a2f-4496-9b10-9853a277f9da', '50a8184f-f19f-404e-9877-ea1077230e4e', '019bcc50-6123-4eeb-8546-5250538e61e2', '4fba3e0d-6bbc-451a-a178-3bb18ee078fb', '091602d9-05e9-4901-962c-3270bb9c312e', '3b22a002-3b7c-4fa4-8ee5-cc2c3e8d4d56', 'cc8008e9-d40d-47da-b54b-b3f424b24505', '2f947fc7-5c73-4851-a963-21f509a4896c', 'f75586b2-2ca2-43e1-bb6e-f822d27be24a', 'd490b5f6-a992-48e0-aabe-ce0bce34d670', 'df79b0d2-97c5-4f27-b666-2adb5cd0f48a', '2bd54ccf-c293-4314-8c1e-a303644d6bb9', 'cfa5613c-6655-4a6d-8ab3-e07d7b7728bd', '14f6cf28-64b4-40e2-9268-1f089ea5da27', 'bd1ee1d9-f4dd-43eb-94bf-9239efd44bf3', '059cf712-6e18-4627-abe5-a1b8ff9cbdc1', 'baef22b3-4ad8-4abb-af43-450662a7776e', '39822d59-2f73-41b6-87cc-f3a2259daa47', 'd09ae6e8-2b33-42bc-9e1a-ce80a752610c', '7eb1f69d-4d32-408e-9bff-8b3db237c0eb', 'aa18b2d8-2a91-4a64-bfda-9c1c0fa57929', '89791f07-8c12-4a5f-ac63-df79bfb1a085', 'e650f78d-c168-4978-bdb0-b49d0dba2d13', '88bf06a6-1851-4f16-a12a-34491bcc3b83', '8bddc597-340e-4e1d-a478-d4b975bf7277', '68925f41-e1ce-4bfd-bd81-2f0f60efc800', 'e813e65c-d92b-4868-bb4c-5ba9db17d3a6', '99ae9b19-35ad-4eeb-8a1f-13e9a87ac29b', 'fd0aba93-6b07-4572-8780-c2d14c517140', '75b7fbec-537c-4673-aa46-31ada7d96748', '971c7918-518e-4706-9f4a-6aefe3397816', '2d465d63-b8d8-40c4-b2c3-2cb890faa342', '8ef2e4e8-2cc1-46be-b16f-d85545b72416', '43a37136-8118-4c80-ab60-8bc01a693454', '2d1c3f07-ae38-48cd-b1a0-c6aef9c93333', 'f80389fb-0d10-42d5-9304-59525bf496d3', '3921b742-4382-4245-ab08-3198477c568e', 'ae45bce1-c90d-4586-b538-9e669b3e8061', '15f18a09-e29c-4918-a2dc-339848b41f97', '2aec25df-faeb-4440-8933-7102573cced0', '49865b06-63c1-4402-87de-28536f7c1ee0', '53746900-96e8-4ece-b9e8-97a05362ed07', '761605bc-6868-4385-aa27-e8c70b35a6ba', 'fd7f52b3-0c01-4560-becc-1b15b2764a38', 'bf573243-b01d-489a-93d7-84965001ab46', 'ddb07668-0925-4a47-8d06-b8e728b22e2d', 'a0237897-65f3-4dec-ac16-f9d99575977a', '7354e80b-1dba-4618-aaa8-d20fc62f0d6d', '0e3abbf0-dc37-4070-8e88-f010c29a3883', '2ef6a642-43c6-4ed4-83b1-c3031a1a856e', 'f72bccde-0463-4b64-ae32-c2df5d3720d0', '4bdce24b-38c6-4f41-b989-c05b88c81e76', '073c3bfc-88b2-4d0f-9933-666975be19ff', 'bc580063-da93-4bff-9598-aea9b70b7469', '926fbe70-940c-403f-baff-aa6f99290a1e', '2186ca44-c989-4732-a6e1-b82a5b1f3422', '893bcc70-e07f-4b43-9b9f-31da760ecf03', '99e2ade8-0903-4faa-b6b7-c484e038dd93', 'b24eb30c-ecd9-4d3f-9480-33aed6622c4f', 'dd34383d-bb21-4d12-894d-8e5c1fc4673d', 'ffbbb894-c909-4ae1-8c4a-3df4ee8e7471', '6eb91e2f-8f73-4a22-99f3-fd7110a46ee0', 'ab957290-f34a-46c7-961a-c08ebcdcfb17', 'dd2b4477-6c53-42f0-afd9-a905b86ab0c6', '3b5bb293-b75e-4179-ad61-295ecb983919', '33af1259-1441-4b43-8bd0-4a120b7ea110', 'be7af3f4-bc14-4d34-8f4f-c29f25d7c464', 'b5319be8-f252-4f4f-98c0-8270fffb8d46', '3cd3e990-fd39-4300-ad8b-eadebfa6f51a', 'd82e9df0-0f27-4a86-8e15-508ba6e47291', 'a57c895c-b447-4429-b90c-c4c1c1555fab', 'e09b53ec-97fe-4269-bbf5-096b59c7b4ae', '95155dec-e2a7-4c18-a1c1-eb006cb05840', 'b972bb9f-82f7-48e6-a744-f3b9d00cd97b', 'fcbea328-096f-474f-867e-7d499ba4fade', '0f49f69d-deb8-4715-81e4-ab5a34136a12', 'e9eaeb38-86ca-43f0-ab62-089e86221b1f', 'def3d924-a214-41be-ad00-9b06c9f5e515', 'a72efe0d-7c19-420e-b2a2-f3800fa2e432', 'caa0e90a-53b9-4ae6-a022-0f48a273432a', '8727b6b4-37df-4fa8-a32a-0f4e0ab3e9a4', '9221c119-52ad-47da-905d-6cddc1efd9b9', 'c91d2e2d-174a-43f5-bd93-ca35788e8eee', '67081e29-4449-4be6-872c-5a76e2297336', 'eeae5f04-399d-4133-9e7a-d0d387c58d48', '1a36d310-4dbe-468e-b521-31fc65f1e8d4', 'fac8326f-cec3-46c3-b1c6-8d17146955a5', '08db75f5-658c-431f-ac6d-6a26e22c7d19', '9e5075dc-7714-451a-8a98-7093f734f2cb', '61d01b83-bc4d-4549-bece-00fd04057274', '92ba0c63-0575-4b5d-8e71-a21cbbc662b4', 'e1249e60-cc0d-4232-8c45-6cfe99f30ca8', '94448197-bd9c-42ca-8bc9-e385ec3520be', 'f65ef8cf-009a-46f0-9ef8-ebe99759ca95', 'ad39d970-8c4e-44ec-94c5-402b54564d5a', 'ec9578a2-b5c5-459c-9f5d-641e15485a0b', 'd6f4fc84-df30-4b23-bab5-a771027581cb', '8266b310-fae9-4718-b16b-2268a11a66ef', '0aa70d1d-866d-40ba-9904-37fe9ecac652', '7f161631-4a9d-442d-9674-fe94c7d4a8b0', 'b42ec05b-caee-47ee-ba28-bd57213d77c4', '647310af-5612-4f54-9000-c5cac99bc46d', '3540b56d-1478-4885-aa5f-cc487d2413e2', '6a592d02-1181-4277-852d-248e0ea7203e', '37ea11f2-3330-4dfb-8e39-14035b83136f', 'c4a8ecd2-6c60-48d7-a13f-8b3510b1a364', 'd67455a0-d1be-4ba8-b343-fddc705f6057', 'd1043eab-a90e-44c3-a256-34621e532f16', 'e2301dc5-421d-4c61-9ebf-025dfeb53327', 'f213efd1-6eeb-4265-8e9c-efacd175760c', '7f50a247-6f64-4890-ae09-417cb69132e2', 'fbad127d-491e-44cd-8b38-2ea083ef37a6', 'c49cc88c-b9df-4109-aad6-1e61455dc8ac', 'd142b43a-f677-4c9a-8ba4-03bbcb73bca4', '3b29ccee-9dc7-4dd9-b9f9-6c4e25dd68b8', '3bf63cee-b380-4abe-b2a5-602bb66b7d93', 'cc361fa6-74b2-47c5-ad23-191e170252ea', 'd9b28007-ccdd-4ea3-9e85-b8e800448890', '8d841fd1-23c0-493f-82c7-e86cec7e70a7', 'ef0c7fbd-536a-4e3c-92fc-4583b9cabacc', '4f4e9fe9-b60e-4e90-adba-cf8b8be7768b', '2acbdaab-7a3f-4312-95bd-cc4897e99a90', 'c4a355cb-2666-4ce6-bcac-3741b6745dee', '2eb77ed8-f881-4074-aa7a-b570a250ebb8', '14f2fd84-30d4-4100-b743-f3fa3de63d78', 'afaff09f-5c1a-4be1-ad7b-b08bfa7a6e30', '4efa44c8-0a36-4654-a0f1-fb23e724f1d6', 'f4b2c79e-e306-4895-9903-068dd1b928e2', 'b446c4c6-22b2-4d9b-a23e-e7340a1d6933', 'ccecf5b3-ec7b-4763-b1da-bd985edec793', '47459705-9628-449c-9041-ba003f817cd6', '53226652-144a-4145-a53e-e6bbdf5afd3d', 'c463f1ad-add4-46d0-a53a-2233cb16d4fb', 'c7a911aa-8120-4a7b-943d-061f40e67326', '5cc67de4-da04-427d-9bcc-f0b051550f43', '62f608a3-df66-48a4-b85d-7d2f6f382e4d', '6da7c7f8-1c14-4cb9-9350-d8218a4f3711', '77f2419a-fdcc-45ac-8843-ce6d41ea3dda', 'ce1e8075-ca25-4230-845c-a7cca62e47b9', '7ef78f30-e2fc-463b-8a95-474673fbdc32', '7aacf89c-8920-4e76-b274-e265f91b57bf', '363d8d40-9338-4c9e-846f-657d85e7e8fe', 'dd1bc021-ca81-47df-a456-51c925973528', '01c8055c-4568-40b3-811c-649b04d78030', '96936be1-ecbc-4134-a939-27938d22672a', '12c11d30-5826-4638-b503-0b4b7bd585bc', '47a6295d-edf6-4ca6-9a20-c21982312270', '18641e16-0a25-4cf4-ba6d-75fb6e98d003', '92429c5a-0d8c-4b06-aff6-8c13e22251f4', '0f33f6e5-61d1-48bf-9b96-1ae549d582e5', '5b5ab3aa-f370-44b5-a0b9-b36d71654300', 'bd8cd320-5c34-4e4a-b08b-a6fd8b4ff097', '7d91c76a-dfad-4aed-9162-66619b0e72d7', '349c7c9d-a778-499b-b4eb-2ba5b492ced5', '27ea878f-59c4-4661-bd84-e157e621a665', 'cb7e03f6-e6dd-42ff-babb-faced49bf441', 'd335601e-ab6d-4be1-9f91-b964e697585e', 'c9ab79dc-0649-4215-aadd-4ef2aea89b9a', 'adc314c0-5900-44fc-ac48-097c362dc71e', 'a26fbb87-bb80-47e2-9e0a-1cca43fda8ce', '20cf1b00-b850-4d8c-9364-27bf01a253fd', '3a7c9db5-938b-4f31-94e9-8ba416010954', '67fec110-95cc-4e94-b7cb-64673b9b066d', 'b431beb8-a095-466b-ab6b-4f27bc752d1a', '5898540f-b159-4a08-ab97-00b0c0ed8c2f', 'b915e56b-0f0b-4319-b819-144491be542d', '04016064-57c7-4c67-b4aa-6153bba8d63f', 'a0c41cc1-12e0-4450-b6e1-c54d0447d8b5', '9d082457-50d6-42d9-b1ab-a1997ca424a8', '90b077f3-7ece-4ab4-8d0f-2bcf591e2cc9', '0848b04e-0e86-4a33-8f6c-cbe009edc470', '58bef473-ea0e-4de1-b208-070f7b099361', '85828bb4-2356-4fbd-bc9b-6967c76173d5', '4eeebbfb-268d-4779-9b90-a83286226fb9', 'ffcbe280-14d2-425b-8830-121c2e1b9201', '24498133-6c5f-4980-9aa8-fd28cc5f80af', '21c37b1d-f752-49c7-8487-25e98549e8aa', '572cf6c2-3fbc-4ecf-92e3-6dcfd546d9a8', 'efe16640-eb29-4a06-834e-f8d207a484b3', 'd970fb5e-caf9-4372-b419-ce538e17253e', 'bb01de2d-348a-4e91-82ff-d62f2afceeea', 'b5f05ba5-d28f-4519-824d-19e9d7cca705', 'd0524158-01b9-47c3-a25f-cc782d4eb09e', '318b4246-f068-41f1-8220-85a8757bd6b4', '268394c7-9c8f-4f8f-a3a4-4114333cb500', '415c74c8-1066-4d1b-9147-31c8be04ae56', 'ac5ee5da-a735-4810-ae04-d7fc01841a63', '9d93f5a9-2ff7-4e6a-baa3-f4f0c702572c', '5048513a-5320-4398-9a4a-e89b6bae78a2', '253ea35b-28be-4f48-b1c7-4971a451ea5c', '5c42ef7c-274e-4125-bd18-b65ee6ed2d64', '8640a2da-86e2-4579-a788-b950b3f7426f', '5bb5bb32-dad0-49e4-9f6a-df7d4d67da97', 'e01555ee-d264-4a9e-8601-acffa3e65586', 'dbbca09c-d5ee-419e-b23d-ef79d477ea31', '485b1023-8c14-415d-bee0-7a894dd60f46', 'bec47bb7-0e46-4c5c-ae6c-c8255df69e1f', '9b78df40-14b3-4497-8690-2b6ce51c2980', '45ab50c9-ca64-4740-a7e8-16b7e0fa2bb1', 'd7834f1c-49b6-44df-8e46-7c230e0c5a8f', '7b0854fe-7d21-4ffb-833c-2730187e0e43', '9810d7f2-4c79-410e-a1f1-69b9cde4b470', 'be180eb6-a4a8-47da-83f8-fc8c680cd9e7', '3ab9ba59-209b-411f-a4d1-313a71c0e886', '2079c3f9-ca9b-4adc-9215-2e6e3ff75da2', '9cf9403b-521e-4646-a2a1-13e4d660d33a', '80a8d908-0bc0-47e8-bdb6-5b2f779c61ef', 'bd843e56-aeeb-4081-8706-98be171e2622', '6c81a630-ab27-482d-af1c-310db129984b', 'fc08f758-f4b5-45dc-9c5b-5a42f9e78c4c', 'e1085d3c-b660-4786-8662-1e2e3708625c', '87332a73-60be-4804-8533-d1e5b4840820', 'ad10f490-ccc8-4823-87e2-24ebf7b55ce2', '7124797d-bf3a-4a52-bb93-212a506bdae1', '2b5c7a1a-c9f4-4e45-be6b-3ada7037fd02', '94cc09fc-b071-4429-a1c9-b8ec0d7f008d', 'a05a5774-59e3-42f5-9a05-368c83db0e31', '08a452c7-3dbf-452c-b265-1277709b4e33', '2771b08d-3194-462e-a0db-9f18fa969cd8', '9f84b580-bca0-4957-9712-4a70b471e2ee', 'e26bbe5c-c4fc-4caa-bc0b-e8568712103a', 'f1e1e1b9-4e35-4eb1-8576-b97afd95dcbc', '38b8aae1-fbac-4dc2-9255-986c41ea9c49', 'ab5751db-7ff7-4a26-87b2-c2b1a23663ce', '3524ce81-7cd1-4763-8d1a-3eb716ec1aa2', '00d1e3dc-f1fc-4bb5-b3ee-427663891fe4', '62873e58-7ef2-4bd4-9dd1-5f419cb01522', '3a53f743-c00c-45de-a643-66378d821827', 'e3f49130-ddd1-4f08-979f-d63f01787fb8', '4e740bcc-c3a7-4274-885c-30debb96a392', '4a52fe1c-215d-44f6-8b00-41a6d1244e9f', '8953d92d-7655-4b74-96f3-e3a6fa90f74e', 'f9069a5c-a1f1-4355-a85c-40d8e2a3771a', '07ca0ac7-2fe8-4a57-a688-f83976072a09', 'a52491cf-671b-4a97-9c53-4ba0a021e569', '0c5688b8-44a6-4fe6-9ffa-28625ab7347b', 'da92ee1b-c8ed-4562-b8dd-62b86c4653dd', 'f049a99d-81ff-48c3-96a7-e2f6332fcd58', '16aab773-f2f3-483d-9ab0-6980224bb0bf', '8124628f-0e2b-45a8-9c18-b07636801ca7', 'b28bd0bf-d311-40fc-8f07-3d1624db154c', '2f5cbdbe-c6fb-43c8-abed-4edabd62c2d8', '4d5c65b2-befb-4ed9-854f-d7d629881648', '5c2e714b-d1a3-44a5-b7a2-05fa9b49e608', '5b098866-68bf-4159-88f3-746a11887159', 'd02b5096-18a3-4792-a21f-3ea66f16ff77', 'a94a2e79-21b1-485d-98fa-1eb1281bc1d5', '2de74cb6-d9f1-4f1d-ad60-736c37d2716d', '2ffa7795-566b-42ed-a03f-1c13bea4c3f3', '27680ad2-c00f-47b0-8748-f84619de1a59', 'e068c825-0ae2-4c98-9dbe-d3fc6384df70', '164d0a73-6957-4f23-9fae-3508975e7c2d', '0d26baba-e14a-4ea1-b45d-d966bbad9eef', '84886fdb-5170-4768-9808-5d1e8d32b2f6', '8629bc90-e1fb-4ddc-921e-d5b343324165', '7a9306d9-d33d-474b-87ca-d2275fdf2e76', 'e0502cfc-70ab-4ba7-8818-401488018d68', 'be47232e-69db-4d3c-9519-bbbf9e0fb083', 'a876e120-f044-49d3-9187-23b58925933b', '74796cf3-f636-4ca9-b923-e7c8b67f5115', '8ece30a9-3005-4824-8754-43eb2160b112', 'ed9147cd-ccd6-4ab6-9079-1579f03e91cd', '177e9252-d8d3-40bc-a8d6-8c403dcb525b', '06bf7457-03b3-4015-819f-ce574483496e', '008aa376-bce2-4ddc-9497-ef26d663adaf', '3b72bcbd-9fec-4bc0-bd3a-56b2bbb6b669', 'e3a90c5b-e6a9-47ea-be9e-608b9001e38e', '737b4160-d29b-4432-bf75-95a3cba43713', '854333ec-536e-4293-b286-4b3525f8a205', '57d7682b-210e-498a-afa1-cf69ac899c42', 'c866a97f-37ee-47b4-b501-1a3ee2f2d4fb', 'ac1e1c4c-30ef-4ce8-be7d-60947f8000e8', '6977a99f-7e60-4216-b164-18b235eb3630', '69a4a8eb-4b20-4ecb-9833-c829c165c74e', '7491e390-8eec-4d3d-81fd-b75fac819f9f', '9c8c1c39-38b1-47d3-957c-e0d97a9d0709', 'b29c8ddc-9503-42c5-bb04-952ef3be5b5f', 'eb431236-1f0c-4a65-b531-4da09d2cb100', '207f8bf3-872e-4e4a-90c3-8a9280ed3dc2', 'a4db6de7-d246-4209-a525-e49707e1f126', 'fe58879b-6425-49ca-af1b-31de68cb5db4', '0d31807c-c40d-4aee-8a67-72a6a3accf82', '87e1a8fd-2ea9-4e89-9b9d-b8dee07c0a25', '5c249008-ee64-4078-a1ec-92be4a8c73ef', '89c7140f-4534-4dbd-9d0f-e7e2db4695bc', 'c708ce97-36d4-4a6d-95a6-7172e30d61ae', '1bed46e7-4188-40c6-bab1-ad9d0afa9f12', '6cb5ff9a-10e2-4d1f-9450-070ba81e081c', '50a74e1d-9ed9-4cb4-83d2-4954e168f15d', '20eff6d1-ded6-4149-b91f-ac9956393eff', '23469f20-d953-4736-9cfd-81dde80a0f18', 'e1a794f6-6559-4b53-be98-6c637f9feccc', 'd7934eb4-b28a-4f73-b770-c0f885382610', '47ece14b-9b0c-4f81-8448-ccb5c1d5b8d7', '0496526e-6901-42de-8084-da3661fe38e3', '893dca1d-2539-4dc7-bade-c4ad4099f6e1', '85253820-3f5f-4bea-ba48-3eab8447565a', '21483981-f7d6-44c4-9640-025219f6a80e', '233ed799-8e76-46fc-9b45-1849ef6fd341', 'fd84ec83-126e-410c-a994-7eb240aafecd', '4b632633-6641-457f-80e9-4c78b28d58ba', '62946c03-ba80-402e-967b-6fcca9647b0c', '4aec542a-e1a5-4942-8235-2a116c06e9cc', '155b59fa-5d83-422b-83b5-d04e0223fabc', 'da0a190d-e0c2-4972-b152-0398e85ec8c8', 'ac57831d-a2c3-4bef-aab5-7f0253453877', 'f6f23d32-1d81-457c-b2d0-90f930a29f1f', '0b827adb-7b5a-4009-bf65-1158b31f6fdd', 'c8b1a1a6-e897-4ce3-b10b-c6f6380270c9', '4b2ce6da-71d4-432a-b3b5-c78808a5eca8', '45a19173-ce1d-490e-828c-92dbf18ff9c8', '77e13001-3d6c-4c0d-9005-c16f2217aec4', 'eb0d7792-8b3b-4e85-8f54-c620a3031129', '6a38c546-fcf6-4ffe-a20f-d91887c091b2', '976e396b-39aa-4b02-98b7-88361db9711c', '7497a745-5779-4041-acf1-9595ad72b4a5', '7f97e948-00a7-43f9-b7b5-276eacf66de7', '5d6a20b7-37ab-43bf-b345-d23a0595aa3e', '0d6f105a-ac5c-48c7-91f4-1953a0cb8078', '3a93fbfc-6b6d-453d-ae85-1620fbc612e7', 'e70eb078-a63c-4105-998f-24ad0b41fb30', 'f9bb5af7-1547-4edf-8dfb-d05390d67c3e', 'efe67e50-e915-4449-9c53-281d579273af', 'a43a029a-e205-4386-9ecc-476f76201329', 'f0d2b90c-410b-47af-b927-bbfb5eaf920b', 'e7f9ba04-d412-4fc3-b238-658d661f84bd', '5b814018-2152-402a-9502-63251b2edae9', '7bf87422-f89e-4bf7-9589-6688e0ff047d', 'fcdafcf6-a465-44a2-a52b-0650ba0cc5e7', '3d14a2d6-9f21-40dd-ae51-c12ee240d8db', 'ba58cb8e-ceca-428f-86ed-7b92fc69fc2e', 'b337913f-8451-47a3-ad28-0bf87e0643d2', '17944dc9-da80-41da-8efd-d5b65b450a27', 'cf90c388-13cb-49a3-9762-7ef6c6d499c6', 'c7f75f93-4e5c-488a-b3a4-5f111dfa33fd', 'b1291f75-29af-4b0b-860a-e77814d48c0a', 'ac57a236-33bc-495f-9123-1ea8fab126d9', '9c025d9b-94c0-4fe6-b76e-7c1ec212350b', '75928670-a256-42ed-b51e-9543b8ca1635', 'cf864623-74f5-4569-957d-bff279028373', '69efb8c0-9e46-45e8-a283-95e207ab2b59', 'fb21b068-3c9d-4e70-abbc-47810a638dea', '70a11a66-213d-4e5c-b20e-eb2257fefb85', '9a12459f-c255-45ae-84b1-6cfd9738ab2b', 'e21c0822-6e8b-40c3-ab0f-507a05cfe391', '5e7dd42b-1e3c-4ded-920a-138249a90e09', '018424bc-2b43-4895-bf27-25524f6f8380', '94fdec88-9cc7-48f4-b041-5fe7a0e43bfe', '63e5b952-9638-4eb0-a147-6621d743f64a', 'cb06d83c-b5e1-4eac-ab40-c47df0c2bf4b', '09050891-3b7b-435a-8a3f-d7c78a7ea59a', '742c43a2-d8c9-4384-ae47-74355916bde1', 'b22fd18c-13e8-492f-9206-f270e1c0063b', 'aad7bb7c-276a-47cc-bcfe-9a9928a7dc6a', '3016123a-a327-43c5-a94b-fabc3088031b', '0c335735-ee19-40a2-812f-f5a6836bba19', '6c662047-b05b-4584-ab28-41ec154e8bc0', '5c4f8bff-0af6-432e-8849-97cdaf4cf22d', '21984a20-8f34-4c0c-8ce3-aa501087ba00', '880a3c33-50ec-4de5-b6b2-df0f1dacd2a4', '3bd98e39-32bc-4132-ad22-6a4b72ec0a91', 'e5d40dcd-44ae-43a9-8308-849fe531e379', 'eecb4031-c493-4c33-9c13-6be3a0f7073e', '66346330-20cb-467b-8951-1041897cf4ca', '40eceb17-1c9f-45c9-8348-ff758fa9143d', '8122f3c5-49b0-4ee1-ad1b-17ab77090e42', '0015ee0e-5a8e-4fa9-8865-44ffb03f8aef', '3cd77bc1-e029-405b-afa8-17636d05fc54', 'dc315018-cd05-4201-be20-97475462e360', '8bf39c3c-11eb-45cf-8184-d4f479730c00', 'e1077c72-742d-4b92-9af9-809300848b25', '44cf6c9a-bc3e-4250-ba32-65e1a9ee8dba', 'ba54e5f8-d38c-486c-98a0-6805143916ca', 'edb6948a-3200-4bcf-9f21-b8e4cf9855cc', '0c331edb-93bc-4c05-ba8c-df5f9b8b5e3b', '5f534334-cad2-4e1c-8496-26e7c2851c56', '96921e5f-4581-41ac-ba19-481ecdc109f6', 'ed198579-9a17-48f9-becb-964d949610d3', '120b410c-dad9-411b-9ffd-54c21f9d9fe4', '03163388-ddee-41fc-9fc2-5a58d9cac277', 'fca0b907-93b7-43bd-988c-89361da1bc03', '343aebff-eb62-454f-b7ec-757094f90a37', 'a7fd44cb-d463-4629-8177-21a665450898', 'c1763fbb-2ef1-4bab-a748-ec22cffc2f45', '4f61e368-61a1-492c-a48e-94dee66ad83f', 'abe369c0-598c-4b89-9bde-d386bd7ac9bd', '10e77974-2e6e-4613-81e7-7666a782558a', 'f48eb466-2d0d-43c9-b207-f3b651288ab3', '381a22ab-a5c8-408e-9f8d-cdf1a6feddc0', '45432e3a-56ff-4001-8ea6-02cf98a53d9b', '1422db7e-2293-4dfc-947e-42c49dc5759e', '354445f2-d83c-4989-b22a-89e30ccfd5e7', '180a43ff-45c0-428e-9335-fd8ef5735acd', '14be4de9-e856-4ffa-9609-d692b832c32d', 'b00469f9-32b8-475e-ba32-7ef554e7f7eb', '72ee7187-3ee0-40ff-806f-8800c05ecd4f', 'b0b0313b-608d-4cc3-91c5-f3a210d3165f', 'd60da061-2da4-4bc3-9463-35c90e1437ae', '2113c51c-00e2-4f91-b7be-77179d6dbc04', 'aa221d26-5f13-40e2-8235-b769e1db1428', 'e9c1409c-b027-4812-8c0e-b46c10fb0b0d', '53d33101-763c-4d08-9d26-eb2df89a0aa0', '4fc3f2b5-4257-4896-a601-3866686574e2', 'be8ec969-4ec2-4cfe-982f-644d8c35c7ca', '5022fde2-bfb7-49a5-91e0-fd9817a2141c', 'd28ae317-ef56-445f-8faf-0d72bc83d624', '993d9ad0-37f6-4f35-9df3-94ca56959f7a', '6a5357ed-8024-4c8c-8cdc-9d6d4f84b398', 'd27fac31-8b44-48df-b96b-25b0302cf855', '8ca616fa-01d7-475a-bdc3-e64bd544401d', '99bf2bc2-d4d0-45ca-a315-05b1ebbbbf7c', 'c06eeaf9-5803-4445-8391-f81b1662e4d1', '3456c0cd-0926-49df-8df7-4b9903fdec1f', '33038505-ee5c-44a3-b822-61964af6e5b7', '98344f14-7384-4f4d-8d64-6f1c34b87715', 'dded1525-4c57-4874-bd54-788afea27583', '6636a5a6-f8ed-4891-ac7d-f1d2cea00a31', '83ccdbca-227d-4123-976d-1f36fd71f113', '7049651a-5ae6-49a0-a5f4-eab91585bb54', '06deee1e-3704-42e3-86b6-f9add0efc342', '5620d72d-5aaa-4ea8-baff-13172b1422c5', '818723fa-30ca-4a87-b360-ba2c7a0a99a7', '7a959cf4-1c86-4a6f-b0d4-80ef6457feff', '4081652f-3de8-4be5-a05c-4e005c40877c', 'ab181698-1c8f-4915-828f-11265d7cc12c', 'fd09a8c3-1d98-4023-8727-f4d01264eed4', '20395e65-de1c-4dfd-950f-90ad3fbdf116', '0a48e83a-d248-4da6-a37f-f0b18bf6d463', '352ecc06-ae1a-4ccb-b51e-69978be04ee8', '0e3074de-6497-492b-956f-2b0260d8b594', 'ccf599bd-a5d9-4468-9ff2-f160361c3930', '84a8b297-f730-481b-89df-509caaa7c71e', '3f83901f-8b7f-4d47-a63c-e7b469525be3', 'dc4cd5c4-f74f-49e8-bc4d-79c6fbeb6a53', 'defbea4c-0a86-4e51-a4be-4904e7241473', '95c0f6dc-9e6c-4eca-adfc-cfbf60a36766', 'c6b88c3c-14e7-41ec-bec8-52db96600e45', '57151ee1-c317-4b92-9063-9026fcd2d714', 'ac1a48f3-c04d-4581-972d-fcd8ac0475fc', 'dad57afa-3641-45b2-b72f-526fb7d3bcc1', 'cf34815b-73e8-4b54-8679-2feaa8d13de3', 'ec1e87af-dfee-4647-b8c0-fb20289fcee2', '8706fb54-7fd1-4aa3-b722-7d8fac4f827b', '0bfcd3c5-a38d-4404-bc29-ad8d3a9c4c24', '43efecde-9069-49a8-90bc-f8feb40d12b9', '2b5bf77b-82db-4503-97c8-9b0788ee8b22', '9f09ad54-c487-401e-8cd8-ea3a2280543b', '729e6258-bc72-4570-bcb8-d843c3267915', 'e7a1ed48-2e5f-444e-8d9d-c093b16ebe44', 'ea3157d6-fe7b-4e54-b5df-5379624cc497', 'cfa9d41d-3a02-44ea-91f8-953eb3182764', '59920d50-9d77-4fea-8d55-4a945acb63c8', '501eaea8-1f32-4229-80d1-2c1b5fae67af', 'e1e45db4-3118-4547-bed9-4c716a11159d', '5e1a930c-801f-47c6-9cd7-7b59afb19e25', 'b093c56e-4796-4d49-bd47-ff25c77cbfc0', 'd83bdf9a-1d8d-44f0-ae07-4c85fc160883', 'b5306c1c-5720-4e74-8144-fdbafa5d7985', '874d33e0-910c-491c-a02f-912260fa2a51', '8e49476e-b869-474d-a4d9-f14a70cef522', '179210b1-13e6-405d-8be5-d0ae62dfa88a', 'ede69a6e-c0ef-4473-b9a7-f1b7bacea0b4', 'b57049b0-7f33-452a-896d-b3f5b85758ca', '73b5bbed-bd34-4007-8fb7-fbe28dc000d1', '51179581-9016-40e3-95e9-e9066a8612f8', '0f51ca0e-7ca2-4f9a-96c3-099be2513ec7', 'd38538a1-2ae7-4110-8e59-049026736ba5', '90f9ebbf-7a12-434f-bc14-bac0d48f1ddf', '7f6b00e2-7322-491e-94dc-1162cffa67a0', '2a65e367-0ce0-44c9-9929-d90c1fc0fe37', 'eb989f2d-e476-4fb5-8b49-ee0594a6be1b', '142b5756-806f-4a84-aefb-47080fcfdd60', '7692fd44-1d22-40ea-8a70-8b185df98ae5', 'cbce5267-8389-4b70-bf07-522d4f39717a', 'c95e0733-b655-4cc0-9126-67f25ddc61bf', '3e485f6e-4dee-4693-a96b-506341ad0448', 'e99f020b-0897-4917-893e-8c546902525b', '4dbe96f9-6dbf-459c-9500-01d3264a16e7', '02cebec4-398d-45d6-9f73-ee8b3d008a3a', '1773424a-c26b-444f-ac50-ae6ab9476007', '39fe13bf-0417-4b5e-841b-76473ee321ec', '4e070586-f079-4ca6-b572-c2706141bdc7', '0e16fd32-50c1-4174-b140-7bafc9024e16', '56023f57-a23f-4dc5-a39d-50b26e76660c', '208ce00d-5063-469b-b608-52b44e58fefd', 'c61279b4-9a7d-4a0a-af05-7c8dc38c5efe', '02e5db5e-5dc6-439e-8ebb-aa962405d8f7', '7df20670-4aaf-48c2-8218-a04a3f3c5ea4', '966522e6-3e16-470a-9564-d4e7dca08d89', 'bb0fbc9b-68a7-45ad-860b-70fa65ddfad8', '4d9f77c5-d29c-47ae-b81e-2a9e7e07127d', 'f86486dc-3bd6-48d8-911d-2810f332e608', '3eab49c2-abc5-473d-b20e-2bbf28947943', 'c8a8283d-5abc-43d2-b663-050d0c03fe39', 'bb0dab1d-3c88-4827-b47e-3832fc3a5972', 'e14077fb-ba50-4235-b366-fd10724fe8a6', 'b14432bf-f0a3-4c69-a2c1-fcc60e48c254', 'c4fcb912-3b7a-484c-8829-8cfc799829e9', 'd88819a0-237b-44e3-80f7-9175c7bef484', 'a4b32777-37e2-400a-bc8e-89de8b3c34b7', 'fe989eaa-e5bc-41da-8393-ee95163fb109', '6e2ea477-9d45-468b-a188-4e91190292d2', 'ca01230f-fa59-4cb5-835e-011bbc75e25a', '1ab716a0-bd75-42e1-a200-0bf7ff94c351', '32b05b54-16b3-4a3a-982d-ed86e1711b56', '0c70d942-64d2-41ef-91d9-d1482b42a64b', '27a768f6-bc85-4cf6-bdae-c291de154696', '438b9df1-1e93-43ec-85c0-d95945edd11f', 'd96e5d7e-0b6c-457a-8aba-e3bd4bf22188', 'd1650e02-5777-4bb9-8ac0-1758cb3d48df', 'ee77275d-9118-40e6-be7d-6d4def126c28', '1809f320-111f-407c-be6e-967821501cc3', '0dd0c204-eb8e-43e0-8642-499bfe9864af', 'e8579d44-51fe-4f29-a6ee-cd01a6fe2730', '6056b55c-b4ce-436e-a906-d7c8af889a75', 'f9f00968-73d6-4816-bf04-1a994f0d8047', '8e9f5ed7-ea4a-46dd-a9d6-c80f17684bab', 'c4b18d3e-957e-4f24-b917-8a1740fedd9c', '6a2a5685-b8d7-4a56-80d3-bb1e5dfcfe43', '0b77a9e8-08d9-4f09-992f-c07a706cd476', '9f33555f-7429-4e6e-8021-ac93456b5b6a', '324f5e62-cca2-4824-88aa-d0d9c80e8ff5', 'f9de9b6e-7ed7-4488-aaee-4f53e387c121', '8808cf29-5a3f-4e63-ba82-8ab2a112899f', '3873a03c-c090-40d9-b74d-a372329b71e9', 'aba96395-220f-4223-8c33-ceaf8d40fe53', 'de689115-6d31-414d-a738-45c2d5a89196', 'af5a0677-8d11-4291-9ccb-47f2f32ee939', 'ad83fda2-6800-481c-848c-dee120da5b1d', '2bf05c38-ced0-4e8f-8261-f5da6ff2f9f0', 'ea3e198a-3128-4655-a085-b9f952572d78', '4c0631d6-6482-4465-bb7e-7c673ff99c12', '19540e55-5b04-4135-8297-d97d8583244c', '03a679e3-4fd4-4d5b-a076-b6792045fcc9', '5554db4d-2497-4788-a7a0-ae6300870da8', '0c8f41d3-6f83-4ab8-8620-8f023e53449a', '2652cf84-803a-45c3-8913-90452c43ac6c', '04e844fe-6920-44e6-bb06-46025ff83b3d', 'c13c4478-6c1d-4a22-b559-874ae9e9e347', '3d5f1acd-2933-4e11-995a-eb9388e8277a', '0362da56-5875-423e-85a1-1bc5c1c059e9', '1b349918-9828-4fed-ae81-24fc8e6a9463', '5b280eab-3331-46e9-931b-a56101a53c7c', '1665f5e7-3bc0-4369-9eb7-0d75fd062d4b', '0b156382-40cd-41db-9846-93a7de33cfbd', 'e8f9e94a-0aa9-4433-961f-a8f375ca8738', '111a58c5-babf-4807-b9d5-7f518007e24d', 'c1af6bd3-ca6f-49d1-b2dc-76657a2b2e18', '52881ebc-291e-4798-9324-3228cbcbe2a6', '1468e609-4a58-4594-b76d-616634f06f83', '87e456cf-7094-4e67-9551-c992f417437d', '5e2458c3-5c7f-41ca-bcdc-ed6599191fec', '9cbd340a-7dc1-4829-95fb-6cdd23bd86d9', '252c98ba-b3cc-40b8-8ae6-e5dc0946df55', '96bdfd44-2c03-471d-aecb-438e36d9c5e7', '1d1b90f8-f6ad-4628-b1b7-1da215ea88e4', '46d5c86b-fc1f-4f48-aff6-1c3d9385a888', 'fe4b6cd4-dce6-4006-ad73-c39a2f693f77', 'b952571c-27cc-4084-ba0b-ddb176b7fba4', '8c22f5a1-7639-41d6-8aee-e937e4a7b167', 'ccb754fb-3317-46c1-9dd1-3eaad9ebcff0', 'd42c6eb8-c397-489f-ab67-6a8ab3b3206e', 'db54bb81-1060-4791-87d9-586485252d9b', '2dfefa48-9d49-4a1c-964b-90dadda8a40b', '21d2e5cf-75b6-4e78-8706-b0b2125cd53f', 'dabbfe07-4de6-4959-8b1d-418132a1795b', '20b1a09b-2378-4ab1-9312-709ec3d845e6', '4da76e6f-d4d1-4468-a48d-2040f9727a0d', 'b9076a8a-39fa-4ff3-ba38-6b8d21b57b18', '43a9823a-525a-4731-bbfb-6f791718d55d', 'b2841ede-db4e-4743-8df9-9e9d9ed892f8', '4c27fb9a-66a2-48dc-a4d4-896ec87ffe1a', '21ef7798-c937-45eb-8428-b59a80c7290a', '110bfa8e-bb32-41d1-adf9-964c10a9ee76', '5cf10519-b699-4c1b-8f22-dfe9c7771161', '9da6918f-c2f2-4fdb-bd80-194220fcf989', '41cfb084-b1c7-4033-89c9-d67531384ead', 'd83e0652-95b3-4cd0-b17b-6d24cacf45dc', '58670ee5-4e97-4b59-9fa7-f969130df90b', '4d69eadc-23e2-4e98-af8d-6653f15563d3', '23b27688-6b1e-4431-8d64-81057262e7cd', '0a2d8bb5-16a4-4d53-85c3-1a134fc1014c', 'afcb3e74-3071-474b-8edf-8ffe4d1893b5', '90230c1a-41b3-4c31-89b0-04f5795d4fdd', '4e340de2-8ecc-43af-902a-78eb6baf587e', '188dab93-af56-4389-8879-e103a2cb2728', 'f23bf534-8bbc-4ae7-9eca-a9b4d54a96b8', '91093f44-eb86-46de-8f1f-62cbf71d60e3', 'eda66d17-ecd3-4029-b5d5-70e89510f9f4', '07809a86-b71c-4462-a42d-63690f16f623', '1ea65d5f-f256-435b-8288-6626c539f54e', 'd9321a2c-2965-4fb4-ae06-9ab99e829f95', 'd82dea44-9fa6-443c-985e-425e7782a8ea', '74bf3805-fa7d-4953-8d83-a1943b4d0810', '01365b0f-4886-4e0a-b9ee-82894430bca6', '931c5610-7fc8-498e-ae3e-d51fbb5067bf', 'b20d3cec-4c8e-41a9-ab63-9cf2b4bd0347', '3f9915c5-4f01-4bff-9592-c3aaa52b68de', '0d991e97-9bc7-4bcd-aa98-f0c2cf04f37a', '552eb22d-64fc-4530-956b-f4ba81e6ad0c', '5822a97d-b39b-4a8e-aaf6-66c0f55442e8', '34d72a45-7010-4d21-a036-4a2438622e84', '93052f03-d3f6-4e99-b33b-fbf01d9c4015', '03f5dfe8-049d-461b-85c3-6fc69fc8c283', 'cf03deec-23d3-4660-8c2e-b8bdefeb9b0a', '5fef6365-8c1e-4ed4-8fa1-57358c7b9fe4', '7b2ce29d-8516-4c4c-ac78-9ae1dc3691ce', '4104e677-2ae1-43a7-81f5-9cfc1b727c56', '8b577111-1c19-4dd4-9454-b667542fc14c', '747abfd9-f93b-4a35-a33f-2439d429e01c', '467b6110-a23a-413b-adaa-144687c8b582', '998dd0ad-3f08-4d96-9a8e-e9f183ca2f99', '7e96da24-14b2-433b-88f6-6996fa4b908c', '966380fc-3da0-4d88-8cae-78447e5d7da7', '6c507807-fc6a-462a-a752-6047b0c4aa80', '1eaaa15d-ff01-4799-806a-f445bb595cc8', 'f3e352ae-6087-423d-ad5a-8b74f74005a5', 'dec59be4-f19a-457e-8e5e-e8f83adc14c3', 'd82ec74f-6934-437d-8c2a-453571ae6c22', '951b0827-49b0-427d-9221-81eb2468bac9', '9577494a-352f-4b77-b8f3-0b636a94e776', '6387a9db-9a58-4c0c-8b55-672fb90a3b06', '0e89eaf6-9e6f-4182-b757-2b9d34b82e01', '0be9332f-4228-4dfa-aa4d-06df280f11e4', '3835d593-3474-4ac5-977a-7f3aa3233c47', '3131077e-71ff-46ea-bc8a-0e1b739cf1d7', '6b6b9fa7-a4b6-4972-afb4-8bd080746a3a', '5d657cf5-5def-44cf-9c73-cd5b8e4b4da5', 'fe0b73ca-891f-46ed-bcea-14cdf1d6081d', '725c1567-7319-4680-baa0-ce8b36462aac', 'c00aebb1-09e8-4033-828e-1e814976e43e', '935aa743-5e45-4ec7-9a13-3c451f051c57', 'd9ef103a-8dd5-4980-9500-80ea5d7f6f89', 'bffb275c-f674-43f8-b6f4-79a47f4e86ea', '8532452e-3204-46f4-8a71-5eba74002460', '0f893cfc-f4af-4c47-a870-e34b69ce837c', '3c1dd867-820e-44e7-99a1-e012d254371a', '6b42ab6c-0be1-4230-8564-4f3a0e8aad13', 'c959bbd4-979d-47c9-a901-dc89dafd4d32', '1f646ccf-5d76-4de6-b8a2-94b6d0c607e6', '845a5953-31b9-49fa-a131-96022bd91fa7', '2bb3aa71-526d-4b6f-a946-dee0f6e14337', '3b660af2-7cda-4118-842f-b7425cce0a68', 'e9dc4ab7-a302-4228-81a1-a43bfaec1164', 'e614955d-7776-4273-a8b8-d34f6f79ab8a', 'b0c04914-85fb-4225-810e-8a51136853ac', 'f1ced455-9899-4c94-bcc9-dd5580ee0e1b', 'ed57a0cf-58df-4b0c-9210-b618274047fe', '9d891ff6-69b0-41cc-8405-bdfca1093e9f', '438a4b88-6dda-4269-af1f-1b6c4b033e93', '7e0187fe-cff8-400a-8b41-7380564ecbc0', 'fc2a4192-9c3a-4691-a394-680e120ff67d', '8170a346-4781-4913-bf87-adddfb94fb3f', '8aad025a-2761-4cc0-a02b-d89232d2bf1f', 'c10b68ab-7a7a-4e1c-9d9b-09ffc0ea3b03', '3840edfe-a987-4127-bf96-cc48530700cd', '67b4e872-4238-4ed7-b87d-cf82945b5623', '173d0d33-b241-44d4-8ae9-f6e805482dac', '399686aa-be33-487c-b7ba-b282d8350b7b', '5746c9d6-3910-4e15-825d-2dcb61ea83fe', '99a4ff73-fcd8-4b7a-9299-8af82881f974', '72a01bc6-12d5-422b-9609-84e75db9a884', '96b058e7-3175-4d82-aea7-f799a0b0c99f', 'be7cb997-b4fb-4ef8-a5d6-7c64ad1291f4', '421aaffb-c82e-40dc-8d38-9898d99cbc26', '3dc31f9e-9bd0-4161-ad17-d959c813fd11', '53e5e0fc-8a69-4cfa-aec6-b0ce3b9edf94', '8b726202-b708-4739-a163-254b6fb39313', '8f962a0f-1ccb-461d-93bf-5e81fccf824d', '94d83e6d-7bcc-4fdc-93ed-6ba68298148c', '796b0c45-9307-4939-a30e-a5f27e5b2684', '0eee3b1f-00ac-4e2b-a98b-49cf63946c2f', '311fb727-5e71-4e40-a012-555f9a446d05', '8cc7d87a-78fd-4d45-bc1e-553f3a3a27e8', '2ca6e65e-5871-42ee-90a1-734a0136144a', '522959a7-fb80-4765-a7d0-2d522dc28faf', '51de8d48-3261-4288-a3f5-380223cdcc58', '4413e19f-3837-484a-91e1-56b6099fc231', 'd4e0fcb8-5772-4bea-a791-5b37fea8015e', '4d267849-be47-46d4-9c69-937bd09af531', '6874cb22-bd36-4d4a-9b90-b0f3996c4e99', '1916fcc1-b219-4215-b9e8-81adee18369b', 'e44fce12-c169-4132-bb7e-0dcdcd159631', '0cf9cc8d-faa8-4c1d-b3e0-7d2521184620', 'eff59481-d6dd-4705-8ee3-2659ace39f4e', 'f0f5d3ac-a5dd-4606-ba00-f9f2f822257a', 'eb59557b-b43b-4090-863c-a213b86c1253', 'aa89ca11-798c-48c2-947f-c6338fe1f5ad', 'cb672671-6930-4df1-ab16-3b93cc1fb317', 'e000050a-f23d-4a9c-b7f2-7857422518ad', 'babf30ba-c930-417a-8276-a44be7de21f2', '9bec8178-d59c-4ddb-a098-fc7114384adc', 'd5fcf26d-c357-41ca-bf2e-83a9c934922e', '8903ce30-dd24-4855-b231-04ca7078df2d', '02d922f8-31b6-4e0c-89a5-4e0e04ccb799', 'e6ee949f-dadf-4368-a548-87e7b11536f6', 'e2493301-56ac-4688-9810-3cbf11fc2c08', '43991c07-3760-45e9-96b1-94bfcf18e295', '3fb19119-dede-4957-88d5-0d1d7511c599', 'fefa4b6b-fad6-43fe-acc5-b09a58b51743', 'feb4d014-91b8-4cf4-b063-3db5fa1c5ad9', '210f2266-261e-4c97-86b0-c28ed536644d', 'bdeb5530-7f62-4a41-b7b3-15488cf0b46c', 'f09020cd-9642-4ce1-97ff-7fca54906d2c', '2900465f-4868-483d-a1ae-428094f0f476', '7084ac95-5d06-4794-8df1-1df11413b78d', 'ff3c5d6f-f241-4157-90a7-67ac26f8f006', '4d2cec7e-d5f1-4281-86ae-658a8fa41fe6', '1fca1b8d-e135-4bae-a10a-385e383d9653', '2c8b3a6a-9da3-46e8-8a32-49cf12335cde', 'ae5ae087-cb5c-4120-9632-4d3a0b911514', '197e9820-7d3f-4da7-a8a0-fa2fa80f42a8', '7f557950-8930-44cb-ba42-8e825ed735c0', '46eacd24-af3b-4e66-87cb-e643bee894fd', '169e2182-995e-479a-aca3-85268160cc53', 'c2bf5a54-dbd2-44b3-bfc8-2dce20557952', 'b759cf1a-7b77-415a-bb27-8ccffd28d7db', 'a39d982f-e7c8-4351-a90d-3fcf4a988c19', '46f78e1c-4123-41b1-a188-667850b231e4', '95e34313-c0f2-49dc-a0fd-fe2fdeac2a6b', 'b91e28e0-c437-4aaa-8408-e8a02d4b506d', '5e6395d7-06ec-423d-b50a-2ac4f1437fd8', '7903d3c5-95f0-4eec-b649-4d1c80d792e2', '6f5d09ba-947f-4f3b-bdb0-d18c02f1afcb', '22329393-c166-40c7-8d3e-6816bba6e29c', 'e9aa3380-b6f3-479d-bae2-376aa5c8dfb7', '3f7d36a6-e87c-4dc9-99a8-365badff3e05', '5678f58d-46ca-4b83-a49b-4fcddc6fdc0d', 'b274549e-86cb-48f0-9558-2184cdcff436', '89971c5e-5588-4492-9b31-06f6208e7000', '5fba0e8f-e69e-47b0-a343-76cedfdae173', 'b4c68b68-50e8-4113-8957-ec1679cc8dfa', '378261e5-81b1-4c04-9bc1-7c7ae8e7dd3d', 'c10a1b6a-aff2-4960-8c91-329876776d69', '7bc562a3-bcbc-4a52-9861-952c38277fa9', 'af80cb3e-2dee-4fa9-8956-9597e1688ddc', 'e8eaf64a-2297-44f9-90fc-cdc9bca1baf7', '5e8df0be-24ac-4a94-bee0-584ee04a07b5', '064723d6-00e5-4674-a242-464a93ea1d28', 'a323a4db-f6b1-4e74-9798-19daa2fbba68', '29cf590e-00e1-4742-9133-e82a679242f3', '860b7eee-16e5-4f29-aba8-181826c9f72e', '385649eb-50a1-4c1a-9a89-f33886c478a2', '858ddf6e-1849-4aaa-916c-817d51358073', '2ba0a6dd-2694-4477-bc0d-8208353a46c7', '040e4431-86c9-46e8-aaaa-4eb33042f9f4', '6c9971cf-8294-4c26-a621-ba608f2e2210', '9769cee8-4227-40a7-baa0-5979a6781d9b', '3cdaf913-86e5-4c56-8aaa-0e84256a6eba', 'cdb7771c-7b75-4cdf-be63-f5fd83f960a2', 'ba5c502a-b8f9-4ab1-8ef6-f69a33850530', '488c602f-9606-4652-9f48-92368287c697', 'bf8b3a4d-431d-4bd2-9fa2-4744fef02e22', '8448b340-6747-400f-9b41-32da8e44d7ab', '1d1d2cfa-a75c-472a-ab77-20eb6d3c4633', 'c904dae3-f53d-4d7a-85d1-4c8a2dc2339f', '47b7a0e4-0891-41eb-bde4-2544d86127a3', 'b35e857b-8a56-484c-b789-851322371a6d', 'f374f5a2-2929-4541-95b1-0a31790de38c', 'ddc26d09-ccb7-49a3-9268-d5a658fb64a4', '28687620-f1c4-4a63-8d61-39378a106936', '19f09579-6c09-4218-bfac-987279dc4d4a', 'd1f0e4ea-fcf5-484b-bb3f-47d5a45c0bf7', '1c1804f2-7225-4aba-b442-2a10f37fc1c0', 'a85dff36-4c1c-4720-aedd-2dae93df43ad', 'daacadca-84e4-47b8-a21f-2b50f00098a8', 'aa423fec-29d4-44ba-a751-d519c6c6f195', '4c9f597e-3920-48ea-9891-4911e3655c45', 'ade3111b-5d39-4ec0-b156-8288d09231c8', 'fb99de55-c54c-4141-b09c-717eca8f7d0f', '3f298b84-d402-48ea-bdf8-1ece07f281c3', '9c50f1d8-2a93-47b1-a017-1ffc6daa7b4d', 'c08f65fd-1910-4485-b722-a53187e09429', '7b7bea4d-98f4-4992-8b48-c18223bbbad7', 'cf51c84f-ce2c-4b61-8dd1-546d231409b8', '0551505c-d424-47e0-9d56-78d0a51f8dc3', 'b6801e5c-2242-4cf8-9604-78109cd11e21', '9f882e90-e94a-4585-874b-7ec8d8744e64', 'ea63003d-77da-40ab-a201-b99e4c9a1d0c', '11a6b488-ba25-440e-ac49-9808e106fb5f', 'ad7a086b-10de-4ac3-8ffb-251730a044bc', 'b92777e7-13ae-477b-84e1-a8555d9507d2', 'a9149c15-aa45-4752-a3e8-d032e9f39f8f', 'c6508b93-09da-4085-9516-fca9ece1e576', 'b56e2469-8cb8-47ac-b81f-9406ff4c8fdb', '373055b9-80f9-4909-a8af-5d75ffe56cc9', 'd2dd5c42-65c0-4269-849c-ab323ff18287', '7b148d1d-7614-4c0f-9e5d-a2a1551d069b', '399d6809-9c5b-4deb-81d3-04febd01e004', '55d0e130-d99e-4bc7-9eb7-c82d43b8ca90', '318d3ab5-c08a-4a28-a639-d975903cfe4e', '5e8de72a-e8e6-4231-b369-a7224eab9c03', '0fb3202f-1083-4f0a-89b4-c6bc08d579eb', '33accbac-6ea2-4d9c-bd65-17aecc2d1788', '3569f895-c7d8-4b32-85a1-5aa4390589f0', '9d1d4098-e4c7-46b7-8bdf-cf309f8d73db', '0603eb0f-f10a-40c2-ac2c-c4bd46e26b6e', 'bb0eb5d2-e021-42d4-8e73-fb39903f0f78', '44954583-fd9f-42b5-9d07-69dde1f0ec07', '258c6347-b992-4b76-ad70-be74790730f1', '81056a62-676b-4617-8de8-d9649a13f48f', '003c8f23-db0e-48a1-a064-95b5e3563b21', '02f3da3c-def3-42df-aa07-ef380c40dd50', 'd02096d2-ea9d-4cfa-b6c4-f53497e08ac3', '9b9c22ab-29fd-4b5d-8682-36e0ce7b7f0b', '598ab5a2-ef47-49b5-8519-f3300ceb3d4e', '84c26bb3-042a-4fa8-8b31-c1b2b82167f9', 'faf13981-3b3f-409c-ad0f-51573019e6ab', '22537d25-2ba7-49b2-842c-ce26778bfae2', 'd0e95c2c-c657-4aa2-b384-a4d50f60be69', 'e1613049-4b33-4fa2-a78e-70c4aaaf9270', 'c2028001-fc3c-4e8b-923e-55b904d40229', '4eb26e33-e232-45bf-a404-282144abe044', 'ef79ab95-b4f9-499e-8ee4-81eb5ed67251', '49ab448a-566a-4f73-ac9b-69101d468221', '8af208aa-b377-4c33-8fc8-6090b77a7afa', '1dae6998-86f1-4362-a152-4554f89f0fb3', '4b8b9049-809b-4648-ac76-8fca482e7e86', '62814558-2fa4-4e3a-a9e2-cabb62a59e7a', '8cda2b59-82cc-4bb3-a3cf-f12cf5fccadc', 'ec4eb38c-d38e-4302-afd9-524b0ad73cb4', '8a4e1146-c83d-4922-a321-4aec8785aad4', '1b4a6022-38d6-4b62-89d5-f6eb68c60ee8', '84bc9749-fa80-4f05-9b35-5c2ff3045419', 'b11a6f74-2ba4-44a7-80b0-869cabc2f63d', '1196e8b4-564f-4999-87e2-ae9ac6fad282', '6e34e507-3af5-4ac7-bfb5-51d78d7b0928', '6b91f418-6741-4070-ad58-ec73886e303f', '18983c58-6531-4faa-9727-ea6c13176ee3', '755d1d58-68ca-4dde-adf0-851c2b476d3d', 'c1d6a069-305d-4689-a558-17669cf479e4', '5602e6bb-f08b-487a-b3d9-7630dbcdcdc2', '813715af-5676-4b8f-9fd1-a2663f35ef7d', '26924049-c3ea-4759-a2f1-0fdebd1119b4', '9b6aae76-6230-4c92-9b4c-9304e022e548', 'fdc3ec36-ea11-4266-8662-de8352cc52c7', '97e3e269-a605-453b-9dda-c19fea3fca48', '53ae2d8d-4a16-42d9-bdb4-1bc1023d664d', '3211127a-4ec9-4ebd-8907-dffbbc58ec55', '1fc04565-7418-4c10-9ac6-50647eedde5b', '306678d4-3658-453f-a8a3-7456018d6309', 'faa249ba-de7e-4d76-9474-8926644a2a30', '5c7593cf-4a7d-4cd4-bd29-cb734a9634ed', '4023419f-9887-4839-a1c1-b21a49331495', '11f0be9d-9f94-402f-bdea-87147473f981', 'd4b7e836-c7a8-4c98-874c-132496ed2971', 'eca5a70e-7107-445e-a627-e66a2fb6905b', 'd345402b-0b15-49cc-b579-a2c8f8f52a54', 'e37b0b51-218f-4ba8-b863-42444de3ba28', '8a397265-6fa1-44f8-acab-f87a52690dfc', 'bab522f7-2829-4f92-bc7f-e37a3b80ecd5', '459832e0-6ac8-4983-bafa-a5c51c478b1b', 'a4b3d48f-9a05-4008-ab74-becb3639b0e9', '4833ae87-3c70-4121-821f-d1cece4b69e8', '51d7afd2-fa78-4efc-88bb-d134300e00ad', 'eb1cc4a8-d4ca-4da8-81ea-2fe995597d12', 'e26e859f-a6fd-4f0a-8cb2-bf789f2ff3ee', 'b5541836-7150-45e8-9ed9-c30fb8bc5848', '6f83e38a-93b6-46ce-8326-b57bb2b00da1', 'f1ccd644-8e75-4cd2-a80c-f970b516e199', '2ee73550-24bd-4892-b976-527b6f6a5726', '0aa57ceb-5914-457e-a15c-dbcf345866cb', '7f5631b7-39a2-4406-8a5e-3118999bac67', '47896e44-9374-4b91-a256-cf7153bd15fe', 'd76ea38f-63f8-4557-8297-9e89d8386e2a', 'f92523f0-582c-477d-a495-a4f470e344fd', '2a7f199f-02bc-4f62-883f-9ca54aede574', '324107ce-d849-43e3-8952-4b676f556e90', '3691c5ad-7deb-459f-9a0d-9c9f858ac41f', 'e7f317ff-1164-47b1-a771-9839b24eddfa', 'a3c75ca5-32f9-4e19-b098-d4316e81cfb2', '187395f9-c570-4e47-afe9-06347a813112', '569fcdc1-8b6a-4712-a952-9a24aae208f4', 'b5b715fd-ae86-43da-a2e5-e6be17e808ec', 'd11b411a-1916-47a9-837b-c0abe563da0f', '08c81688-0120-40c0-859d-0fd37adbe64d', '55cff872-df09-4714-875d-a435f2577338', 'a6355884-037a-46e3-9763-d53f0db0dcd9', '23ab1723-68b3-4c19-aed3-5082111b3638', '2eaf5944-ba2c-4564-8343-ba833cc0433d', '89562bb4-4510-42e7-a479-110b2c0083ee', 'abc5a263-55c6-4d2a-9b3c-28be717663ec', '7139dbe6-cdbe-441f-9df7-0a0a5decd365', '89f14c69-044f-4959-ad97-1de41da6fbec', 'f538d79d-4d75-41a6-a2f8-615a87e355db', '3959f1c0-f30a-4438-be70-df7b7c3c24bc', '3bba1626-aa31-41d9-ab67-3322b2552fc4', '4a6e293e-2163-40a7-a364-3a5258deb92d', 'a29a1179-6908-4d3d-a04e-0ee51265417f', '5d941dee-007d-4ee2-bc50-e7f65c35bc88', 'c7dc091b-15ea-4ea4-a2b1-84d58e77d154', '3240b04f-e939-4437-a96f-a88b67ac19f6', 'bad23e12-7f9f-4695-acae-089d9d854c77', '997fdcc8-ab4f-4568-9818-152e7317ce5b', '5a4c858f-fffc-4d9f-bd80-a390acd0343e', '82957453-0665-40db-be0b-bddb7719031b', '6027ecb5-32db-438b-8f08-9fb6202805bb', 'cfccab1a-39fe-4c1d-806d-195e9369888d', '039f2de2-d2df-40d1-a5e8-5113bbde69bc', 'd7c1185c-d0f3-4cef-b2dc-83b175589cd9', 'd4716363-a7dd-4f2b-a78a-b6503cc32842', 'b63785a9-52fb-480a-a44a-e2216a86ce8c', '90dfd53e-4eb6-41f9-acce-f375ce7d8fe9', 'fa6e8c36-d4bb-4f26-a584-0bc7ba59ba1e', '2bb3396c-050f-4aaf-b0f0-37998fcc5a6a', '57af70e8-e148-4c14-bee8-122d6c720af6', '21046f3c-e0bd-415e-9be8-dd57ea7708bc', 'f3513c31-5688-404f-a0d6-2cfbbb808e3a', '597884b0-1750-4665-baa5-fbf431300d65', '4c7fc02b-ef0d-483e-b2dd-cc1f559252e9', 'ef36cecd-0101-4a56-8b75-1fccb434ae0c', 'ae44ff15-8d57-4d1f-9871-8ae0d5b806de', '5fca6739-f372-4114-b61a-a118bd810e49', 'b2fd1280-1f2a-4ed8-a2c7-c982594269ca', '07eadb7d-c147-43d1-a8f2-28dbe3fd8860', 'dd3faab7-d4d3-44db-89b7-13973c451e67', '1486189a-4ee9-44fb-80aa-9407f2b07150', 'af1fffe5-5acc-4d1c-bb81-2c6d87a760f3', '9b102876-af60-46c8-af70-0b7e6f6911da', 'f8b86bca-b6bc-4789-8a39-c88ecd4015ad', '7c69ecea-2493-4a1e-8da1-57596aed4051', 'd4702512-48aa-44a0-acac-3e6f1f8109e3', '247be6ec-cf48-4588-94d8-bf12c3cb2c8f', '15ae0bf9-2470-4f58-849e-6f43b09c2973', 'c45c18ef-4f86-45e4-8106-27b335db73d7', '202824d7-f6d3-46b0-8d11-0eba8b61c219', '1b9cf29a-ed52-436b-a8c7-0d7d69e6d63c', 'f2f693eb-0c05-4c8f-8e8c-4c0f84c74b06', 'f5de50d7-536b-4973-91fc-a0c371a87671', 'f663e8ba-35cb-46e7-b387-67b840d9f39d', '2e20e906-4745-400e-a3a3-cf54a0b1c893', '6cfe9246-8659-401f-98cc-e6318feae664', '1ef72e1a-dbee-4190-8321-812d51e7d686', 'ca1a97d3-54df-4c01-80f2-9a7ca7704315', 'c92d09b1-0228-4e65-aecc-bad1b5fa1d4b', '7f0c65ba-28da-4974-bb48-94b88bd73338', 'c224e0f8-8c5a-4c97-a51b-0a19474a30fb', 'b37c52d5-bc51-433d-b8de-3118b421d8e2', '7fe940e1-1771-472e-a840-2b542266a9a4', 'c7c6956e-0b9f-4b99-9e7b-5658f50c327a', 'de2e819b-4944-4c4c-9d05-deaef8aa3bd9', '507ca903-c5da-447c-97c7-6c7357558a4b', 'e565b9e1-ceb9-405f-b657-62a9f748ccb3', '6d0edc41-58bf-45ff-803f-b7d27ffc6ccd', '395bfb31-cd92-4a4c-9a3d-add53a752d4f', '5d5786e6-23a6-4d40-bdba-e86f1fd7118e', 'fb9ea444-03b1-4903-884e-9b26ff708732', 'ff9a7888-89a9-451c-b845-af7424351181', '8f9624dc-5d94-4c9e-a00b-31c6415d895a', '5e91cd98-0d63-4c86-b9f7-c8baa6458861', '7dfa18ea-e1e2-4470-bad0-01e4b5307feb', 'd8bf2357-de69-40d4-ad59-c063955f41f4', '4967c305-97e2-4eab-9476-b112de6781f3', '2db2da1f-4eaf-4e1d-8235-d9e6ef07b2c4', '03a21d40-95b2-4d4e-92f2-abf80a4c8238', '8b26b6dc-4d67-4a62-a514-50a6ac3c01f1', '802db244-ab90-4df3-bb29-7c1dd0165874', 'e4f00665-9b29-4ed4-a88b-5241656b12af', '3939a5f7-4995-4e90-b40b-d2bb4fc22b49', 'c79947a7-56c0-4cfc-ada7-3fd6e945f248', '835fb369-320c-4073-91dc-de0edf07e1bd', 'f297ff6c-1660-400d-ad4a-d1a1c62a1bc2', '1f68d81a-3492-48e0-b3a1-8586c63da175', 'ae4a9dfc-0e60-4667-9655-e39adcbca78f', 'd21acb7f-64df-4673-9ce2-96666e7265fe', '8d64bd4d-8595-4ac1-939f-5bb3425a226f', '16732e86-4b6f-4df0-835c-0dd361a8dc62', '609bde74-281b-4427-952c-0b662f6a8a69', 'b4389ef7-afbf-492f-b479-337bd74c32e9', '55fb8a3f-4fd9-4660-9f94-2b6c29fc81a9', '57d539b2-42f5-4022-a0fc-b57409b482df', 'ae746ed6-1c6f-4249-8019-e17160c3848c', '923e39ba-b2df-494d-8197-aac3653d3b72', 'e96017a9-115c-4cbf-bbd0-9383e859eef3', 'd2f6bcc6-db57-4113-b660-9cf80d62c9ca', 'd330220a-aad0-4492-b624-b349f1cea454', 'c4ae012a-c1b7-4082-918e-257731b145fa', '74546286-bd40-432f-9f9d-575961e4dd45', '21c2b2fb-994c-487a-a625-ea269d4579ee', 'a1cfd3c1-f997-4cfa-adb0-54292bbd6b85', 'b9cb0ad2-3d9e-418a-9f5d-6566b8ed93d0', '98290691-5388-4ecb-9337-47147293b5fd', 'f1652b5d-9b6a-4d3b-b1ec-6ff963b3b477', '92b46f74-a073-429c-98b8-9e584478916f', '2ad3c93b-5a22-4d08-9cc2-6bf708e6c44e', '1da769da-20a7-4d93-a7ef-eec854bf00a6', 'efb4108d-6703-410f-8319-b213b4b154ea', 'd15effe7-2994-4e62-8303-22322d1da30a', '85ad8b85-ab16-4604-89b0-d718a9e51f87', '1900ff4e-b699-4dfe-8c99-8b958b5b71a6', 'c15430de-4d31-41b5-bc2b-e633ce072178', '138d822e-c11e-4598-972b-fc4ff509a399', '615d3a8f-f61d-49e5-9167-70533c1da7f2', 'd918e7df-7faf-48fe-9566-443022b8728f', 'f883a04b-2c5d-4e44-9e44-8b18d9aa8447', '16a55782-1dee-40a9-a9d3-adfa7ad95736', '6de79c13-5fc7-4d73-b7bb-019300d6d9fd', 'd7787324-8d3f-435f-8492-5bfa8a0e706a', '2da1e433-48ba-4149-a3d9-db8c3cf24785', '47846124-cee4-46f5-b4e1-bbbc91e0f6a8', '84dda06b-7227-413b-b8b1-6001a7aa2f6f', '90c04f9d-13da-49c5-8f32-3cd84f76025a', '10b71b21-a450-4170-9381-5ea888c3292e', '3a4f3e85-40bc-4a0b-be1b-9d0ec303a670', '33756a5a-9c8c-44d7-aff2-87a320990d14', '1fad5ce9-0ccb-40f7-911c-c43b98e3d12d', '75188c8a-fe3d-41bb-a093-d4a6d8eb729d', 'ea554bf7-b674-4745-aaca-c1def3c4af2f', '476d461e-88ab-4bfb-96bd-48a9ef2f31be', '3c10dc35-240b-4441-aa3a-bdb3a94bbbe3', '4fd9ec0b-fcb7-47ab-bfad-a0cecfcc475b', '123840a7-15f9-4fb2-9d5a-832b2f505d00', '31dde70a-4087-4f17-b395-89d02471bb3e', 'f5f973eb-21b1-45d8-8414-5bc12e950d01', '11a30795-1b48-4632-bff1-3caa01df8ef2', 'ddca540e-8358-47b4-8743-c3a44899cf72', '5514d8da-782b-4703-a90c-660d9c6231a8', '42ffef23-edbe-4751-be47-6e0d0726a6ab', '9324d62a-6668-435e-b8a9-198c3df52ba7', 'd92143c3-fb51-4ba4-8638-487dab3d6247', 'f2ff94ef-de8e-4f73-a37e-0bcaa347d99d', 'f60255ab-a4a2-49c1-8540-f992cab2cf9a', '38d16303-6a5c-4ac1-a32f-5173f58aaf74', 'a71ea415-d11f-4d25-9e8c-4ff2cf61e748', 'ccc108a9-a4c0-4b3e-bc77-8742c677eb40', 'e8470c51-fb5a-4d80-b268-a76244afa1cd', '4859471e-9b38-4d03-8595-6b42694b2754', '8ba99e2d-48d8-4cc5-b4d2-d44b0e007f87', '5c52c71f-79bb-4118-af46-9b3e1d14126e', 'f738e93b-f70f-4cc8-99ce-c6d37707e6d0', '9d032bfb-af2c-4e91-8e6d-2c023959875c', 'debe73b1-f5fc-46ac-b402-eb425e9c5fd0', '6d29f750-02f6-4b0f-8a38-6204a6fc92b7', 'e4398ba5-a39d-489e-9bba-2b92f429d97a', 'ea2ff417-ccbd-47d6-bd1c-af1a551d8af2', 'aaf0380e-4ea0-42a0-b1b1-7793fef5e2fe', 'ac2cb032-eb2d-46ff-a3db-852a6c81cda4', '6bc11515-5d3a-4e3c-bcab-8384543daabb', 'ff2e3046-836f-4b2a-b207-2b6d3dfa42ce', '6c2d5d19-df8e-4eb6-b614-8707e8cdf9a0', '1d8da1c9-8e2f-4b73-9b88-8fac9df1d231', 'c800dbc6-4da4-43d2-912f-9ce9921fbbc4', '8bda2d72-5128-47a6-830f-836074ebb065', '9a280b19-547e-428b-8f65-3777d0fb091b', '2d44fc49-10b5-48b1-8cc5-0f6389c3c0ba', '3d6c6485-cf6e-4f37-8554-4e3ba314ead9', 'a42971ac-5460-4131-bb1f-252b3bdc6ef3', '065d9070-44be-40b5-bdba-d457298805ca', 'e682a038-2cef-4195-82bc-2db6a0bfc190', 'e2be2967-49f3-4b45-a9d6-697e5ba28c19', 'cc14bfda-e671-45cd-92f9-726e3af73a75', 'f6a2bbb2-41aa-4535-98ef-fa2bc6690553', '0af5f6ba-88a0-429e-ab34-eda507ab7b2b', 'b50a6077-62dd-4370-838d-4504e756ec76', '4e46ecc8-95f4-4e71-8546-7eff67aa9674', 'f9cc3e05-a4e0-47d8-8f85-c536944550dd', '76cf6789-64f3-440e-83d3-ea3a785a5f18', '1ff7f628-9d32-4da8-ad05-5157f21941b4', '819f5622-c337-499d-a5fd-09a6645a0bac', 'c20836de-ba7b-4848-b301-02f9ee73c2c4', '654a3768-44be-4085-97b3-b0c81c5b1e25', '6078246d-c76f-473b-aa53-aa9811d85202', 'e39365be-501f-4175-8b96-7bc21822038b', '92d48f92-0ab0-4295-b566-020041776d81', '3ef430ae-20ea-4b90-b4e4-7a147feb538e', 'f0eaf582-15e0-4f4c-ad11-a238903e3398', '72fac284-ba6a-49fd-9c64-360e7916882f', 'd27abf6f-ba69-499f-b16d-aaf4977976c9', 'b647b47c-7ce3-4d07-ac00-04f19aeb813f', 'c870e45e-e8cf-4428-94e8-a6e445da34c9', '711f7051-020c-480c-8eb1-3e134684a53a', '0eb793d1-1ba9-4bd7-8266-e25a5293b290', '370765bb-1578-4b2d-a525-9b68eacb5d3d', '2b7f2ba9-d96b-4929-afc3-aca3d6815ff4', 'a017a9e9-7486-466d-be7f-86204f271ac6', '049f12de-f88b-4053-8f33-b0d2de666717', '8f465387-28a7-484d-bf80-63678548df43', 'a253ed92-8cce-4781-a737-25713b56d0f6', '6dbf6270-19ad-44d8-adb5-2afacbef528c', 'b75162b1-6df6-4307-ae15-5cd999c3f293', '4317f253-649d-4453-a2e2-33cc114b8d20', '45570cc0-6b65-4920-9cc0-2c8cc404cfd8', '84d7db8a-7bf4-4c5a-97a1-92079a5d3827', '2060d9f8-f893-44bf-8b10-1147abdd770e', '20b67577-5a56-44a4-8cd7-ca222f3ffe1e', '98f5e175-ea55-4f9a-a8e3-57f5e72c2b99', 'bdfd2ff2-bdaf-4b2a-93f9-f6059c48543d', '0931c0f2-a490-44e7-94e9-e697a8f14a25', '0c415312-f2c7-4cb7-8b0c-610e6022cebe', 'e99e6197-c87d-4997-9c1a-a7a773cd16a6', '7cb530a2-6707-44d4-8230-f6350eec6633', '662fc59e-1650-4f10-af4e-f517069299af', '68301a90-9ca9-4e35-b650-89e40cd83381', 'bd31a7c5-dc58-491f-9d46-a7e33df609a0', '2af17dee-840b-48b7-97ce-58ee3c30c636', '6b750930-cb18-4f47-b973-b3ae83a5670d', 'ac5a81a2-b1e7-48f7-8660-11d2ee4a8155', 'b5f8fdeb-b540-4698-9e80-5465aaa457dd', '78722194-4664-4d77-8049-55ab29c52019', 'ea956d74-6d2d-4c13-92d8-bc6dbbfb99b6', '1ebb6bce-9e77-43dc-ac99-25f473684dea', '3164e884-dbd6-4997-bc1f-7d7021d18621', '05a75e45-2205-4c6b-8848-de79593dd773', 'ce158a73-2efe-4176-925b-7cb31ee29eb1', 'e8aac383-e775-4c8f-9e71-dc83f1fdba72', '25a69d4e-393b-434e-8c31-595d65fb5ffa', '7cbe17cd-26e6-4b22-824c-7f8847fb66be', '7237c5fd-7944-4cee-b348-f66a7e4dc37f', '1f865aa0-f115-4746-8930-a96f43a9c940', '8f52221b-96de-4b55-aa3b-c8d8be25f227', '03b8eda4-0eaa-4f28-bbde-b43937177567', '3c793dca-6bb1-4a7e-ae2e-0211e901783e', '1fa9bc1a-dce7-4053-89d5-b15d561a4684', '622e7fb9-11eb-4391-a4bf-de7ac21c1318', '3ed2d80f-9191-48e5-8cd6-762b474cbb46', '5c297f97-ab24-4b4d-9fc0-91731eb53c47']\n", + "Embedding status: 401/1691 documents embedded\n", + "Embedding status: 571/1691 documents embedded\n", + "Embedding status: 741/1691 documents embedded\n", + "Embedding status: 941/1691 documents embedded\n", + "Embedding status: 1211/1691 documents embedded\n", + "Embedding status: 1431/1691 documents embedded\n", + "Embedding status: 1691/1691 documents embedded\n" + ] + } + ], + "source": [ + "# Let's add more content to the existing Collection\n", + "article_url = \"https://www.gutenberg.org/files/48320/48320-0.txt\"\n", + "loader = WebBaseLoader(article_url)\n", + "documents = loader.load()\n", + "\n", + "# split it into chunks\n", + "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "await vs.aadd_documents(docs)\n", + "\n", + "await wait_for_ready(collection_name)" + ] + }, + { + "cell_type": "markdown", + "id": "5b225f3ae1e61de8", + "metadata": { + "collapsed": false + }, + "source": [ + "We see results from both books. Note the `source` metadata" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "53700a9cd817cde4", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T03:10:49.644196Z", + "start_time": "2024-05-10T03:10:49.243453Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "of astronomy, and its kindred sciences, with the various arts dependent\r\n", + "on them. In none are computations more operose than those which\r\n", + "astronomy in particular requires;--in none are preparatory facilities\r\n", + "more needful;--in none is error more detrimental. The practical\r\n", + "astronomer is interrupted in his pursuit, and diverted from his task of\r\n", + "observation by the irksome labours of computation, or his diligence in\r\n", + "observing becomes ineffectual for want of yet greater industry of -> {'source': 'https://www.gutenberg.org/cache/epub/71292/pg71292.txt'} \n", + "====\n", + "\n", + "possess all knowledge which is likely to be useful to him in his work,\r\n", + "and this I have endeavored in my case to do. If I remember rightly, you\r\n", + "on one occasion, in the early days of our friendship, defined my limits\r\n", + "in a very precise fashion.”\r\n", + "\r\n", + "“Yes,” I answered, laughing. “It was a singular document. Philosophy,\r\n", + "astronomy, and politics were marked at zero, I remember. Botany\r\n", + "variable, geology profound as regards the mud-stains from any region -> {'source': 'https://www.gutenberg.org/files/48320/48320-0.txt'} \n", + "====\n", + "\n", + "easily admitted, that an assembly of eminent naturalists and physicians,\r\n", + "with a sprinkling of astronomers, and one or two abstract\r\n", + "mathematicians, were not precisely the persons best qualified to\r\n", + "appreciate such an instrument of mechanical investigation as we have\r\n", + "here described. We shall not therefore be understood as intending the\r\n", + "slightest disrespect for these distinguished persons, when we express\r\n", + "our regret, that a discovery of such paramount practical value, in a -> {'source': 'https://www.gutenberg.org/cache/epub/71292/pg71292.txt'} \n", + "====\n", + "\n" + ] + } + ], + "source": [ + "query = \"Was he interested in astronomy?\"\n", + "docs = await vs.asearch(query, search_type=\"similarity\", k=3)\n", + "\n", + "for d in docs:\n", + " print(d.page_content, \" -> \", d.metadata, \"\\n====\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "7b81d7cae351a1ec", + "metadata": { + "collapsed": false + }, + "source": [ + "Now, we set up a filter" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8f1bdcba03979d22", + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-10T03:10:53.663003Z", + "start_time": "2024-05-10T03:10:53.441327Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "possess all knowledge which is likely to be useful to him in his work,\r\n", + "and this I have endeavored in my case to do. If I remember rightly, you\r\n", + "on one occasion, in the early days of our friendship, defined my limits\r\n", + "in a very precise fashion.”\r\n", + "\r\n", + "“Yes,” I answered, laughing. “It was a singular document. Philosophy,\r\n", + "astronomy, and politics were marked at zero, I remember. Botany\r\n", + "variable, geology profound as regards the mud-stains from any region -> {'source': 'https://www.gutenberg.org/files/48320/48320-0.txt'} \n", + "====\n", + "\n", + "the evening than in the daylight, for he said that he hated to be\r\n", + "conspicuous. Very retiring and gentlemanly he was. Even his voice was\r\n", + "gentle. He’d had the quinsy and swollen glands when he was young, he\r\n", + "told me, and it had left him with a weak throat, and a hesitating,\r\n", + "whispering fashion of speech. He was always well dressed, very neat and\r\n", + "plain, but his eyes were weak, just as mine are, and he wore tinted\r\n", + "glasses against the glare.” -> {'source': 'https://www.gutenberg.org/files/48320/48320-0.txt'} \n", + "====\n", + "\n", + "which was characteristic of him. “It is perhaps less suggestive than\r\n", + "it might have been,” he remarked, “and yet there are a few inferences\r\n", + "which are very distinct, and a few others which represent at least a\r\n", + "strong balance of probability. That the man was highly intellectual\r\n", + "is of course obvious upon the face of it, and also that he was fairly\r\n", + "well-to-do within the last three years, although he has now fallen upon\r\n", + "evil days. He had foresight, but has less now than formerly, pointing -> {'source': 'https://www.gutenberg.org/files/48320/48320-0.txt'} \n", + "====\n", + "\n" + ] + } + ], + "source": [ + "filter = {\n", + " \"where\": {\n", + " \"jsonpath\": (\n", + " \"$[*] ? (@.source == 'https://www.gutenberg.org/files/48320/48320-0.txt')\"\n", + " )\n", + " },\n", + "}\n", + "\n", + "docs = await vs.asearch(query, search_type=\"similarity\", metadata=filter, k=3)\n", + "\n", + "for d in docs:\n", + " print(d.page_content, \" -> \", d.metadata, \"\\n====\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96132aa6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/integrations/vectorstores/zilliz.ipynb b/docs/docs/integrations/vectorstores/zilliz.ipynb index 3a9b4bf2005..928d12fde36 100644 --- a/docs/docs/integrations/vectorstores/zilliz.ipynb +++ b/docs/docs/integrations/vectorstores/zilliz.ipynb @@ -11,6 +11,8 @@ "\n", "This notebook shows how to use functionality related to the Zilliz Cloud managed vector database.\n", "\n", + "You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration\n", + "\n", "To run, you should have a `Zilliz Cloud` instance up and running. Here are the [installation instructions](https://zilliz.com/cloud)" ] }, diff --git a/docs/docs/tutorials/llm_chain.ipynb b/docs/docs/tutorials/llm_chain.ipynb index 2e82f9a5de7..6000e03d2f6 100644 --- a/docs/docs/tutorials/llm_chain.ipynb +++ b/docs/docs/tutorials/llm_chain.ipynb @@ -289,7 +289,7 @@ "source": [ "## Prompt Templates\n", "\n", - "Right now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually it constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input.\n", + "Right now we are passing a list of messages directly into the language model. Where does this list of messages come from? Usually, it is constructed from a combination of user input and application logic. This application logic usually takes the raw user input and transforms it into a list of messages ready to pass to the language model. Common transformations include adding a system message or formatting a template with the user input.\n", "\n", "PromptTemplates are a concept in LangChain designed to assist with this transformation. They take in raw user input and return data (a prompt) that is ready to pass into a language model. \n", "\n", diff --git a/docs/docs/tutorials/summarization.ipynb b/docs/docs/tutorials/summarization.ipynb index 2dd0904468b..e84216802a7 100644 --- a/docs/docs/tutorials/summarization.ipynb +++ b/docs/docs/tutorials/summarization.ipynb @@ -374,7 +374,7 @@ "outputs": [], "source": [ "# Note we can also get this from the prompt hub, as noted above\n", - "reduce_prompt = hub.pull(\"rlm/map-prompt\")" + "reduce_prompt = hub.pull(\"rlm/reduce-prompt\")" ] }, { diff --git a/docs/docs/versions/packages.mdx b/docs/docs/versions/packages.mdx index 8e0dbc1321f..1c48e153786 100644 --- a/docs/docs/versions/packages.mdx +++ b/docs/docs/versions/packages.mdx @@ -11,7 +11,7 @@ The different packages are versioned slightly differently. ## `langchain-core` -`langchain-core` is currently on version `0.1.x`. +`langchain-core` is currently on version `0.2.x`. As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything marked with the `beta` decorator (you can see this in the API reference and will see warnings when using such functionality). The reason for beta features is that given the rate of change of the field, being able to move quickly is still a priority. @@ -45,7 +45,7 @@ Patch version increases will occur for: `langchain-community` is currently on version `0.2.x` -All changes will be accompanied by the same type of version increase as changes in `langchain`. +The versions will increase following the same guidelines as `langchain`. ## `langchain-experimental` diff --git a/docs/docs/versions/v0_2/index.mdx b/docs/docs/versions/v0_2/index.mdx index c20a1a5dbac..076521e13e1 100644 --- a/docs/docs/versions/v0_2/index.mdx +++ b/docs/docs/versions/v0_2/index.mdx @@ -11,6 +11,7 @@ LangChain v0.2 was released in May 2024. This release includes a number of [brea :::note Reference - [Breaking Changes & Deprecations](/docs/versions/v0_2/deprecations) +- [Migrating to Astream Events v2](/docs/versions/v0_2/migrating_astream_events) ::: @@ -23,6 +24,7 @@ This documentation will help you upgrade your code to LangChain `0.2.x.`. To pre 3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.) 4. Manually resolve any remaining deprecation warnings. 5. Re-run unit tests. +6. If you are using `astream_events`, please review how to [migrate to astream events v2](/docs/versions/v0_2/migrating_astream_events). ## Upgrade to new imports diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 83c6e022b9f..590518d18ac 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -83,6 +83,7 @@ const config = { /** @type {import('@docusaurus/preset-classic').Options} */ ({ docs: { + editUrl: "https://github.com/langchain-ai/langchain/edit/master/docs/", sidebarPath: require.resolve("./sidebars.js"), remarkPlugins: [ [require("@docusaurus/remark-plugin-npm2yarn"), { sync: true }], diff --git a/docs/scripts/arxiv_references.py b/docs/scripts/arxiv_references.py index 76e22526c33..914c58e7d6e 100644 --- a/docs/scripts/arxiv_references.py +++ b/docs/scripts/arxiv_references.py @@ -7,7 +7,7 @@ import os import re from dataclasses import dataclass from pathlib import Path -from typing import Any, Dict, List, Set +from typing import Any, Dict from pydantic.v1 import BaseModel, root_validator @@ -17,6 +17,7 @@ _ROOT_DIR = Path(os.path.abspath(__file__)).parents[2] DOCS_DIR = _ROOT_DIR / "docs" / "docs" CODE_DIR = _ROOT_DIR / "libs" TEMPLATES_DIR = _ROOT_DIR / "templates" +COOKBOOKS_DIR = _ROOT_DIR / "cookbook" ARXIV_ID_PATTERN = r"https://arxiv\.org/(abs|pdf)/(\d+\.\d+)" LANGCHAIN_PYTHON_URL = "python.langchain.com" @@ -29,6 +30,7 @@ class ArxivPaper: referencing_doc2url: dict[str, str] referencing_api_ref2url: dict[str, str] referencing_template2url: dict[str, str] + referencing_cookbook2url: dict[str, str] title: str authors: list[str] abstract: str @@ -50,7 +52,6 @@ def search_documentation_for_arxiv_references(docs_dir: Path) -> dict[str, set[s arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN) exclude_strings = {"file_path", "metadata", "link", "loader", "PyPDFLoader"} - # loop all the files (ipynb, mdx, md) in the docs folder files = ( p.resolve() for p in Path(docs_dir).glob("**/*") @@ -76,39 +77,6 @@ def search_documentation_for_arxiv_references(docs_dir: Path) -> dict[str, set[s return arxiv_id2file_names -def convert_module_name_and_members_to_urls( - arxiv_id2module_name_and_members: dict[str, set[str]], -) -> dict[str, set[str]]: - arxiv_id2urls = {} - for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items(): - urls = set() - for module_name_and_member in module_name_and_members: - module_name, type_and_member = module_name_and_member.split(":") - if "$" in type_and_member: - type, member = type_and_member.split("$") - else: - type = type_and_member - member = "" - _namespace_parts = module_name.split(".") - if type == "module": - first_namespace_part = _namespace_parts[0] - if first_namespace_part.startswith("langchain_"): - first_namespace_part = first_namespace_part.replace( - "langchain_", "" - ) - url = f"{first_namespace_part}_api_reference.html#module-{module_name}" - elif type in ["class", "function"]: - second_namespace_part = _namespace_parts[1] - url = f"{second_namespace_part}/{module_name}.{member}.html#{module_name}.{member}" - else: - raise ValueError( - f"Unknown type: {type} in the {module_name_and_member}." - ) - urls.add(url) - arxiv_id2urls[arxiv_id] = urls - return arxiv_id2urls - - def search_code_for_arxiv_references(code_dir: Path) -> dict[str, set[str]]: """Search the code for arXiv references. @@ -220,7 +188,6 @@ def search_code_for_arxiv_references(code_dir: Path) -> dict[str, set[str]]: def search_templates_for_arxiv_references(templates_dir: Path) -> dict[str, set[str]]: arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN) - # exclude_strings = {"file_path", "metadata", "link", "loader", "PyPDFLoader"} # loop all the Readme.md files since they are parsed into LangChain documentation # exclude the Readme.md in the root folder @@ -234,8 +201,6 @@ def search_templates_for_arxiv_references(templates_dir: Path) -> dict[str, set[ with open(file, "r", encoding="utf-8") as f: lines = f.readlines() for line in lines: - # if any(exclude_string in line for exclude_string in exclude_strings): - # continue matches = arxiv_url_pattern.search(line) if matches: arxiv_id = matches.group(2) @@ -247,6 +212,58 @@ def search_templates_for_arxiv_references(templates_dir: Path) -> dict[str, set[ return arxiv_id2template_names +def search_cookbooks_for_arxiv_references(cookbooks_dir: Path) -> dict[str, set[str]]: + arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN) + files = (p.resolve() for p in Path(cookbooks_dir).glob("**/*.ipynb")) + arxiv_id2cookbook_names: dict[str, set[str]] = {} + for file in files: + with open(file, "r", encoding="utf-8") as f: + lines = f.readlines() + for line in lines: + matches = arxiv_url_pattern.search(line) + if matches: + arxiv_id = matches.group(2) + cookbook_name = file.stem + if arxiv_id not in arxiv_id2cookbook_names: + arxiv_id2cookbook_names[arxiv_id] = {cookbook_name} + else: + arxiv_id2cookbook_names[arxiv_id].add(cookbook_name) + return arxiv_id2cookbook_names + + +def convert_module_name_and_members_to_urls( + arxiv_id2module_name_and_members: dict[str, set[str]], +) -> dict[str, set[str]]: + arxiv_id2urls = {} + for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items(): + urls = set() + for module_name_and_member in module_name_and_members: + module_name, type_and_member = module_name_and_member.split(":") + if "$" in type_and_member: + type_, member = type_and_member.split("$") + else: + type_ = type_and_member + member = "" + _namespace_parts = module_name.split(".") + if type_ == "module": + first_namespace_part = _namespace_parts[0] + if first_namespace_part.startswith("langchain_"): + first_namespace_part = first_namespace_part.replace( + "langchain_", "" + ) + url = f"{first_namespace_part}_api_reference.html#module-{module_name}" + elif type_ in ["class", "function"]: + second_namespace_part = _namespace_parts[1] + url = f"{second_namespace_part}/{module_name}.{member}.html#{module_name}.{member}" + else: + raise ValueError( + f"Unknown type: {type_} in the {module_name_and_member}." + ) + urls.add(url) + arxiv_id2urls[arxiv_id] = urls + return arxiv_id2urls + + def _get_doc_path(file_parts: tuple[str, ...], file_extension) -> str: """Get the relative path to the documentation page from the absolute path of the file. @@ -285,60 +302,6 @@ def _get_module_name(file_parts: tuple[str, ...]) -> str: return ".".join(ns_parts) -def compound_urls( - arxiv_id2file_names: dict[str, set[str]], - arxiv_id2code_urls: dict[str, set[str]], - arxiv_id2templates: dict[str, set[str]], -) -> dict[str, dict[str, set[str]]]: - # format urls and verify that the urls are correct - arxiv_id2file_names_new = {} - for arxiv_id, file_names in arxiv_id2file_names.items(): - key2urls = { - key: _format_doc_url(key) - for key in file_names - if _is_url_ok(_format_doc_url(key)) - } - if key2urls: - arxiv_id2file_names_new[arxiv_id] = key2urls - - arxiv_id2code_urls_new = {} - for arxiv_id, code_urls in arxiv_id2code_urls.items(): - key2urls = { - key: _format_api_ref_url(key) - for key in code_urls - if _is_url_ok(_format_api_ref_url(key)) - } - if key2urls: - arxiv_id2code_urls_new[arxiv_id] = key2urls - - arxiv_id2templates_new = {} - for arxiv_id, templates in arxiv_id2templates.items(): - key2urls = { - key: _format_template_url(key) - for key in templates - if _is_url_ok(_format_template_url(key)) - } - if key2urls: - arxiv_id2templates_new[arxiv_id] = key2urls - - arxiv_id2type2key2urls = dict.fromkeys( - arxiv_id2file_names_new | arxiv_id2code_urls_new | arxiv_id2templates_new - ) - arxiv_id2type2key2urls = {k: {} for k in arxiv_id2type2key2urls} - for arxiv_id, key2urls in arxiv_id2file_names_new.items(): - arxiv_id2type2key2urls[arxiv_id]["docs"] = key2urls - for arxiv_id, key2urls in arxiv_id2code_urls_new.items(): - arxiv_id2type2key2urls[arxiv_id]["apis"] = key2urls - for arxiv_id, key2urls in arxiv_id2templates_new.items(): - arxiv_id2type2key2urls[arxiv_id]["templates"] = key2urls - - # reverse sort by the arxiv_id (the newest papers first) - ret = dict( - sorted(arxiv_id2type2key2urls.items(), key=lambda item: item[0], reverse=True) - ) - return ret - - def _is_url_ok(url: str) -> bool: """Check if the url page is open without error.""" import requests @@ -389,7 +352,7 @@ class ArxivAPIWrapper(BaseModel): Returns: List of ArxivPaper objects. - """ # noqa: E501 + """ def cut_authors(authors: list) -> list[str]: if len(authors) > 3: @@ -424,6 +387,9 @@ class ArxivAPIWrapper(BaseModel): referencing_template2url=type2key2urls["templates"] if "templates" in type2key2urls else {}, + referencing_cookbook2url=type2key2urls["cookbooks"] + if "cookbooks" in type2key2urls + else {}, ) for result, type2key2urls in zip(results, arxiv_id2type2key2urls.values()) ] @@ -443,6 +409,10 @@ def _format_template_url(template_name: str) -> str: return f"https://{LANGCHAIN_PYTHON_URL}/docs/templates/{template_name}" +def _format_cookbook_url(cookbook_name: str) -> str: + return f"https://github.com/langchain-ai/langchain/blob/master/cookbook/{cookbook_name}.ipynb" + + def _compact_module_full_name(doc_path: str) -> str: # agents/langchain_core.agents.AgentAction.html#langchain_core.agents.AgentAction module = doc_path.split("#")[1].replace("module-", "") @@ -454,9 +424,79 @@ def _compact_module_full_name(doc_path: str) -> str: return module +def compound_urls( + arxiv_id2file_names: dict[str, set[str]], + arxiv_id2code_urls: dict[str, set[str]], + arxiv_id2templates: dict[str, set[str]], + arxiv_id2cookbooks: dict[str, set[str]], +) -> dict[str, dict[str, set[str]]]: + # format urls and verify that the urls are correct + arxiv_id2file_names_new = {} + for arxiv_id, file_names in arxiv_id2file_names.items(): + key2urls = { + key: _format_doc_url(key) + for key in file_names + if _is_url_ok(_format_doc_url(key)) + } + if key2urls: + arxiv_id2file_names_new[arxiv_id] = key2urls + + arxiv_id2code_urls_new = {} + for arxiv_id, code_urls in arxiv_id2code_urls.items(): + key2urls = { + key: _format_api_ref_url(key) + for key in code_urls + if _is_url_ok(_format_api_ref_url(key)) + } + if key2urls: + arxiv_id2code_urls_new[arxiv_id] = key2urls + + arxiv_id2templates_new = {} + for arxiv_id, templates in arxiv_id2templates.items(): + key2urls = { + key: _format_template_url(key) + for key in templates + if _is_url_ok(_format_template_url(key)) + } + if key2urls: + arxiv_id2templates_new[arxiv_id] = key2urls + + arxiv_id2cookbooks_new = {} + for arxiv_id, cookbooks in arxiv_id2cookbooks.items(): + key2urls = { + key: _format_cookbook_url(key) + for key in cookbooks + if _is_url_ok(_format_cookbook_url(key)) + } + if key2urls: + arxiv_id2cookbooks_new[arxiv_id] = key2urls + + arxiv_id2type2key2urls = dict.fromkeys( + arxiv_id2file_names_new + | arxiv_id2code_urls_new + | arxiv_id2templates_new + | arxiv_id2cookbooks_new + ) + arxiv_id2type2key2urls = {k: {} for k in arxiv_id2type2key2urls} + for arxiv_id, key2urls in arxiv_id2file_names_new.items(): + arxiv_id2type2key2urls[arxiv_id]["docs"] = key2urls + for arxiv_id, key2urls in arxiv_id2code_urls_new.items(): + arxiv_id2type2key2urls[arxiv_id]["apis"] = key2urls + for arxiv_id, key2urls in arxiv_id2templates_new.items(): + arxiv_id2type2key2urls[arxiv_id]["templates"] = key2urls + for arxiv_id, key2urls in arxiv_id2cookbooks_new.items(): + arxiv_id2type2key2urls[arxiv_id]["cookbooks"] = key2urls + + # reverse sort by the arxiv_id (the newest papers first) + ret = dict( + sorted(arxiv_id2type2key2urls.items(), key=lambda item: item[0], reverse=True) + ) + return ret + + def log_results(arxiv_id2type2key2urls): arxiv_ids = arxiv_id2type2key2urls.keys() - doc_number, api_number, templates_number = 0, 0, 0 + doc_number, api_number, templates_number, cookbooks_number = 0, 0, 0, 0 for type2key2url in arxiv_id2type2key2urls.values(): if "docs" in type2key2url: doc_number += len(type2key2url["docs"]) @@ -464,9 +504,11 @@ def log_results(arxiv_id2type2key2urls): api_number += len(type2key2url["apis"]) if "templates" in type2key2url: templates_number += len(type2key2url["templates"]) + if "cookbooks" in type2key2url: + cookbooks_number += len(type2key2url["cookbooks"]) logger.warning( f"Found {len(arxiv_ids)} arXiv references in the {doc_number} docs, {api_number} API Refs," - f" and {templates_number} Templates." + f" {templates_number} Templates, and {cookbooks_number} Cookbooks." ) @@ -477,7 +519,7 @@ def generate_arxiv_references_page(file_name: Path, papers: list[ArxivPaper]) -> LangChain implements the latest research in the field of Natural Language Processing. This page contains `arXiv` papers referenced in the LangChain Documentation, API Reference, -and Templates. + Templates, and Cookbooks. ## Summary @@ -510,6 +552,14 @@ and Templates. for key, url in paper.referencing_template2url.items() ) ] + if paper.referencing_cookbook2url: + refs += [ + "`Cookbook:` " + + ", ".join( + f"[{key}]({url})" + for key, url in paper.referencing_cookbook2url.items() + ) + ] refs_str = ", ".join(refs) title_link = f"[{paper.title}]({paper.url})" @@ -533,8 +583,17 @@ and Templates. if paper.referencing_template2url else "" ) + cookbook_refs = ( + f" - **Cookbook:** {', '.join(f'[{key}]({url})' for key, url in paper.referencing_cookbook2url.items())}" + if paper.referencing_cookbook2url + else "" + ) refs = "\n".join( - [el for el in [docs_refs, api_ref_refs, template_refs] if el] + [ + el + for el in [docs_refs, api_ref_refs, template_refs, cookbook_refs] + if el + ] ) f.write(f""" ## {paper.title} @@ -562,8 +621,9 @@ def main(): ) arxiv_id2file_names = search_documentation_for_arxiv_references(DOCS_DIR) arxiv_id2templates = search_templates_for_arxiv_references(TEMPLATES_DIR) + arxiv_id2cookbooks = search_cookbooks_for_arxiv_references(COOKBOOKS_DIR) arxiv_id2type2key2urls = compound_urls( - arxiv_id2file_names, arxiv_id2code_urls, arxiv_id2templates + arxiv_id2file_names, arxiv_id2code_urls, arxiv_id2templates, arxiv_id2cookbooks ) log_results(arxiv_id2type2key2urls) diff --git a/docs/scripts/copy_templates.py b/docs/scripts/copy_templates.py index 015c01611ef..46a4a85fafe 100644 --- a/docs/scripts/copy_templates.py +++ b/docs/scripts/copy_templates.py @@ -27,6 +27,7 @@ if __name__ == "__main__": sidebar_hidden = """--- sidebar_class_name: hidden +custom_edit_url: --- """ diff --git a/docs/scripts/generate_api_reference_links.py b/docs/scripts/generate_api_reference_links.py index 31f4b22fa49..9084b381f5a 100644 --- a/docs/scripts/generate_api_reference_links.py +++ b/docs/scripts/generate_api_reference_links.py @@ -186,7 +186,7 @@ def replace_imports(file): data = code_block_re.sub(replacer, data) # if all_imports: - # print(f"Adding {len(all_imports)} links for imports in {file}") # noqa: T201 + # print(f"Adding {len(all_imports)} links for imports in {file}") with open(file, "w") as f: f.write(data) return all_imports diff --git a/docs/scripts/model_feat_table.py b/docs/scripts/model_feat_table.py index eb3ac8679af..3b047e55097 100644 --- a/docs/scripts/model_feat_table.py +++ b/docs/scripts/model_feat_table.py @@ -24,6 +24,7 @@ CHAT_MODEL_FEAT_TABLE = { "ChatMistralAI": { "tool_calling": True, "structured_output": True, + "json_model": True, "package": "langchain-mistralai", "link": "/docs/integrations/chat/mistralai/", }, @@ -80,6 +81,7 @@ CHAT_MODEL_FEAT_TABLE = { "link": "/docs/integrations/chat/bedrock/", }, "ChatHuggingFace": { + "tool_calling": True, "local": True, "package": "langchain-huggingface", "link": "/docs/integrations/chat/huggingface/", @@ -102,6 +104,7 @@ LLM_TEMPLATE = """\ sidebar_position: 1 sidebar_class_name: hidden keywords: [compatibility] +custom_edit_url: --- # LLMs @@ -116,13 +119,14 @@ Each LLM integration can optionally provide native implementations for async, st {table} -""" # noqa: E501 +""" CHAT_MODEL_TEMPLATE = """\ --- sidebar_position: 0 sidebar_class_name: hidden keywords: [compatibility, bind_tools, tool calling, function calling, structured output, with_structured_output, json mode, local model] +custom_edit_url: --- # Chat models @@ -133,7 +137,7 @@ The following table shows all the chat models that support one or more advanced {table} -""" # noqa: E501 +""" def get_llm_table(): diff --git a/docs/scripts/notebook_convert.py b/docs/scripts/notebook_convert.py index 2319e7dfbb5..ab868ee8adc 100644 --- a/docs/scripts/notebook_convert.py +++ b/docs/scripts/notebook_convert.py @@ -112,15 +112,39 @@ def _process_path(tup: Tuple[Path, Path, Path]): notebook_path, intermediate_docs_dir, output_docs_dir = tup relative = notebook_path.relative_to(intermediate_docs_dir) output_path = output_docs_dir / relative.parent / (relative.stem + ".md") - _convert_notebook(notebook_path, output_path) + _convert_notebook(notebook_path, output_path, intermediate_docs_dir) -def _convert_notebook(notebook_path: Path, output_path: Path): +def _modify_frontmatter( + body: str, notebook_path: Path, intermediate_docs_dir: Path +) -> str: + # if frontmatter exists + rel_path = notebook_path.relative_to(intermediate_docs_dir).as_posix() + edit_url = ( + f"https://github.com/langchain-ai/langchain/edit/master/docs/docs/{rel_path}" + ) + if re.match(r"^[\s\n]*---\n", body): + # if custom_edit_url already exists, leave it + if re.match(r"custom_edit_url: ", body): + return body + else: + return re.sub( + r"^[\s\n]*---\n", f"---\ncustom_edit_url: {edit_url}\n", body, count=1 + ) + else: + return f"---\ncustom_edit_url: {edit_url}\n---\n{body}" + + +def _convert_notebook( + notebook_path: Path, output_path: Path, intermediate_docs_dir: Path +) -> Path: with open(notebook_path) as f: nb = nbformat.read(f, as_version=4) body, resources = exporter.from_notebook_node(nb) + body = _modify_frontmatter(body, notebook_path, intermediate_docs_dir) + output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, "w") as f: diff --git a/docs/scripts/resolve_local_links.py b/docs/scripts/resolve_local_links.py index 99414e3c803..ee271d3ad57 100644 --- a/docs/scripts/resolve_local_links.py +++ b/docs/scripts/resolve_local_links.py @@ -13,8 +13,13 @@ def update_links(doc_path, docs_link): # replace relative links content = re.sub(r"\]\(\.\/", f"]({docs_link}", content) + frontmatter = """--- +custom_edit_url: +--- +""" + with open(DOCS_DIR / doc_path, "w") as f: - f.write(content) + f.write(frontmatter + content) if __name__ == "__main__": diff --git a/docs/src/theme/CodeBlock/index.js b/docs/src/theme/CodeBlock/index.js index 84da0b86ef5..ff8324981d0 100644 --- a/docs/src/theme/CodeBlock/index.js +++ b/docs/src/theme/CodeBlock/index.js @@ -16,18 +16,14 @@ function Imports({ imports }) { borderBottomRightRadius: "var(--ifm-code-border-radius)", }} > -

+ API Reference: -

-
    - {imports.map(({ imported, source, docs }) => ( -
  • - - {imported} - -
  • +
    + {imports.map(({ imported, source, docs }, index) => ( + + {imported}{index < imports.length - 1 ? ' | ' : ''} + ))} -
); } diff --git a/docs/src/theme/DocItem/Content/index.js b/docs/src/theme/DocItem/Content/index.js deleted file mode 100644 index 3e36dee744b..00000000000 --- a/docs/src/theme/DocItem/Content/index.js +++ /dev/null @@ -1,13 +0,0 @@ -import React from "react"; -import Content from "@theme-original/DocItem/Content"; -import Feedback from "../../Feedback"; - -export default function ContentWrapper(props) { - return ( - <> - {/* eslint-disable react/jsx-props-no-spreading */} - - - - ); -} diff --git a/docs/src/theme/DocItem/Paginator/index.js b/docs/src/theme/DocItem/Paginator/index.js new file mode 100644 index 00000000000..0d2093fecc1 --- /dev/null +++ b/docs/src/theme/DocItem/Paginator/index.js @@ -0,0 +1,12 @@ +import React from 'react'; +import Paginator from '@theme-original/DocItem/Paginator'; +import Feedback from "../../Feedback"; + +export default function PaginatorWrapper(props) { + return ( + <> + + + + ); +} diff --git a/docs/src/theme/Feedback.js b/docs/src/theme/Feedback.js index 26896baedc0..b07f1ec34a7 100644 --- a/docs/src/theme/Feedback.js +++ b/docs/src/theme/Feedback.js @@ -195,7 +195,7 @@ export default function Feedback() { }; const newGithubIssueURL = pathname - ? `https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CIssue+related+to+${pathname}%3E` + ? `https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CIssue+related+to+${pathname}%3E&url=https://python.langchain.com${pathname}` : "https://github.com/langchain-ai/langchain/issues/new?assignees=&labels=03+-+Documentation&projects=&template=documentation.yml&title=DOC%3A+%3CPlease+write+a+comprehensive+title+after+the+%27DOC%3A+%27+prefix%3E"; return ( diff --git a/docs/vercel.json b/docs/vercel.json index 658faaf554f..52ad6b46bc0 100644 --- a/docs/vercel.json +++ b/docs/vercel.json @@ -13,6 +13,10 @@ } ], "redirects": [ + { + "source": "/docs/how_to/tool_calls_multi_modal(/?)", + "destination": "/docs/how_to/multimodal_inputs/" + }, { "source": "/v0.2/docs/langsmith(/?)", "destination": "https://docs.smith.langchain.com/" diff --git a/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py b/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py index df191524542..8835b4dde45 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/chat_models.py @@ -1,4 +1,5 @@ """__ModuleName__ chat models.""" + from typing import Any, AsyncIterator, Iterator, List, Optional from langchain_core.callbacks import ( @@ -29,6 +30,7 @@ class Chat__ModuleName__(BaseChatModel): """Return type of chat model.""" return "chat-__package_name_short__" + # TODO: This method must be implemented to generate chat responses. def _generate( self, messages: List[BaseMessage], @@ -38,7 +40,7 @@ class Chat__ModuleName__(BaseChatModel): ) -> ChatResult: raise NotImplementedError - # TODO: Implement if __model_name__ supports streaming. Otherwise delete method. + # TODO: Implement if Chat__ModuleName__ supports streaming. Otherwise delete method. def _stream( self, messages: List[BaseMessage], @@ -48,7 +50,7 @@ class Chat__ModuleName__(BaseChatModel): ) -> Iterator[ChatGenerationChunk]: raise NotImplementedError - # TODO: Implement if __model_name__ supports async streaming. Otherwise delete + # TODO: Implement if Chat__ModuleName__ supports async streaming. Otherwise delete # method. async def _astream( self, @@ -59,7 +61,7 @@ class Chat__ModuleName__(BaseChatModel): ) -> AsyncIterator[ChatGenerationChunk]: raise NotImplementedError - # TODO: Implement if __model_name__ supports async generation. Otherwise delete + # TODO: Implement if Chat__ModuleName__ supports async generation. Otherwise delete # method. async def _agenerate( self, diff --git a/libs/cli/langchain_cli/integration_template/integration_template/llms.py b/libs/cli/langchain_cli/integration_template/integration_template/llms.py index 562fc0f4001..675e95f158d 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/llms.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/llms.py @@ -1,4 +1,5 @@ """__ModuleName__ large language models.""" + from typing import ( Any, AsyncIterator, @@ -32,6 +33,7 @@ class __ModuleName__LLM(BaseLLM): """Return type of LLM.""" return "__package_name_short__-llm" + # TODO: This method must be implemented to generate text completions. def _generate( self, prompts: List[str], @@ -41,7 +43,7 @@ class __ModuleName__LLM(BaseLLM): ) -> LLMResult: raise NotImplementedError - # TODO: Implement if __model_name__ supports async generation. Otherwise + # TODO: Implement if __ModuleName__LLM supports async generation. Otherwise # delete method. async def _agenerate( self, @@ -52,7 +54,7 @@ class __ModuleName__LLM(BaseLLM): ) -> LLMResult: raise NotImplementedError - # TODO: Implement if __model_name__ supports streaming. Otherwise delete method. + # TODO: Implement if __ModuleName__LLM supports streaming. Otherwise delete method. def _stream( self, prompt: str, @@ -62,7 +64,7 @@ class __ModuleName__LLM(BaseLLM): ) -> Iterator[GenerationChunk]: raise NotImplementedError - # TODO: Implement if __model_name__ supports async streaming. Otherwise delete + # TODO: Implement if __ModuleName__LLM supports async streaming. Otherwise delete # method. async def _astream( self, diff --git a/libs/cli/langchain_cli/integration_template/pyproject.toml b/libs/cli/langchain_cli/integration_template/pyproject.toml index 4b134979873..d7752864b0d 100644 --- a/libs/cli/langchain_cli/integration_template/pyproject.toml +++ b/libs/cli/langchain_cli/integration_template/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "__package_name__" -version = "0.0.1" +version = "0.1.0" description = "An integration package connecting __ModuleName__ and LangChain" authors = [] readme = "README.md" @@ -12,7 +12,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain-core = ">=0.1,<0.3" +langchain-core = "^0.2.0" [tool.poetry.group.test] optional = true @@ -21,7 +21,7 @@ optional = true pytest = "^7.4.3" pytest-asyncio = "^0.23.2" pytest-socket = "^0.7.0" -langchain-core = {path = "../../core", develop = true} +langchain-core = { path = "../../core", develop = true } [tool.poetry.group.codespell] optional = true @@ -42,19 +42,19 @@ ruff = "^0.1.8" [tool.poetry.group.typing.dependencies] mypy = "^1.7.1" -langchain-core = {path = "../../core", develop = true} +langchain-core = { path = "../../core", develop = true } [tool.poetry.group.dev] optional = true [tool.poetry.group.dev.dependencies] -langchain-core = {path = "../../core", develop = true} +langchain-core = { path = "../../core", develop = true } [tool.ruff.lint] select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort + "E", # pycodestyle + "F", # pyflakes + "I", # isort "T201", # print ] @@ -62,9 +62,7 @@ select = [ disallow_untyped_defs = "True" [tool.coverage.run] -omit = [ - "tests/*", -] +omit = ["tests/*"] [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/libs/cli/langchain_cli/namespaces/app.py b/libs/cli/langchain_cli/namespaces/app.py index 03e8245c88d..9bf007652b4 100644 --- a/libs/cli/langchain_cli/namespaces/app.py +++ b/libs/cli/langchain_cli/namespaces/app.py @@ -123,7 +123,9 @@ def new( typer.echo(f" cd ./{app_name}\n") typer.echo("Then add templates with commands like:\n") typer.echo(" langchain app add extraction-openai-functions") - typer.echo(" langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n") + typer.echo( + " langchain app add git+ssh://git@github.com/efriis/simple-pirate.git\n\n" + ) @app_cli.command() diff --git a/libs/cli/pyproject.toml b/libs/cli/pyproject.toml index 079f84dce5b..03b1bda1fe8 100644 --- a/libs/cli/pyproject.toml +++ b/libs/cli/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-cli" -version = "0.0.22" +version = "0.0.23" description = "CLI for interacting with LangChain" authors = ["Erick Friis "] readme = "README.md" diff --git a/libs/cli/tests/unit_tests/migrate/test_replace_imports.py b/libs/cli/tests/unit_tests/migrate/test_replace_imports.py index c75b1db74f7..627acb49f2d 100644 --- a/libs/cli/tests/unit_tests/migrate/test_replace_imports.py +++ b/libs/cli/tests/unit_tests/migrate/test_replace_imports.py @@ -17,7 +17,7 @@ ReplaceImportsCodemod = generate_import_replacer( "langchain_to_core", "community_to_core", ] -) # type: ignore[attr-defined] # noqa: E501 +) # type: ignore[attr-defined] class TestReplaceImportsCommand(CodemodTest): diff --git a/libs/community/langchain_community/agent_toolkits/__init__.py b/libs/community/langchain_community/agent_toolkits/__init__.py index 45f715a17b5..e0e67479ffd 100644 --- a/libs/community/langchain_community/agent_toolkits/__init__.py +++ b/libs/community/langchain_community/agent_toolkits/__init__.py @@ -168,6 +168,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/agents/__init__.py b/libs/community/langchain_community/agents/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/community/langchain_community/agents/openai_assistant/__init__.py b/libs/community/langchain_community/agents/openai_assistant/__init__.py new file mode 100644 index 00000000000..f7fdcbdd864 --- /dev/null +++ b/libs/community/langchain_community/agents/openai_assistant/__init__.py @@ -0,0 +1,3 @@ +from langchain_community.agents.openai_assistant.base import OpenAIAssistantV2Runnable + +__all__ = ["OpenAIAssistantV2Runnable"] diff --git a/libs/community/langchain_community/agents/openai_assistant/base.py b/libs/community/langchain_community/agents/openai_assistant/base.py new file mode 100644 index 00000000000..fc977d7c00c --- /dev/null +++ b/libs/community/langchain_community/agents/openai_assistant/base.py @@ -0,0 +1,523 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Optional, + Sequence, + Type, + Union, +) + +from langchain.agents.openai_assistant.base import OpenAIAssistantRunnable, OutputType +from langchain_core._api import beta +from langchain_core.callbacks import CallbackManager +from langchain_core.load import dumpd +from langchain_core.pydantic_v1 import BaseModel, Field, root_validator +from langchain_core.runnables import RunnableConfig, ensure_config +from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import convert_to_openai_tool + +if TYPE_CHECKING: + import openai + from openai._types import NotGiven + from openai.types.beta.assistant import ToolResources as AssistantToolResources + + +def _get_openai_client() -> openai.OpenAI: + try: + import openai + + return openai.OpenAI(default_headers={"OpenAI-Beta": "assistants=v2"}) + except ImportError as e: + raise ImportError( + "Unable to import openai, please install with `pip install openai`." + ) from e + except AttributeError as e: + raise AttributeError( + "Please make sure you are using a v1.23-compatible version of openai. You " + 'can install with `pip install "openai>=1.23"`.' + ) from e + + +def _get_openai_async_client() -> openai.AsyncOpenAI: + try: + import openai + + return openai.AsyncOpenAI(default_headers={"OpenAI-Beta": "assistants=v2"}) + except ImportError as e: + raise ImportError( + "Unable to import openai, please install with `pip install openai`." + ) from e + except AttributeError as e: + raise AttributeError( + "Please make sure you are using a v1.23-compatible version of openai. You " + 'can install with `pip install "openai>=1.23"`.' + ) from e + + +def _convert_file_ids_into_attachments(file_ids: list) -> list: + """ + Convert file_ids into attachments + File search and Code interpreter will be turned on by default + """ + attachments = [] + for id in file_ids: + attachments.append( + { + "file_id": id, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + } + ) + return attachments + + +def _is_assistants_builtin_tool( + tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], +) -> bool: + """Determine if tool corresponds to OpenAI Assistants built-in.""" + assistants_builtin_tools = ("code_interpreter", "retrieval") + return ( + isinstance(tool, dict) + and ("type" in tool) + and (tool["type"] in assistants_builtin_tools) + ) + + +def _get_assistants_tool( + tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], +) -> Dict[str, Any]: + """Convert a raw function/class to an OpenAI tool. + + Note that OpenAI assistants supports several built-in tools, + such as "code_interpreter" and "retrieval." + """ + if _is_assistants_builtin_tool(tool): + return tool # type: ignore + else: + return convert_to_openai_tool(tool) + + +@beta() +class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable): + """Run an OpenAI Assistant. + + Example using OpenAI tools: + .. code-block:: python + + from langchain.agents.openai_assistant import OpenAIAssistantV2Runnable + + interpreter_assistant = OpenAIAssistantV2Runnable.create_assistant( + name="langchain assistant", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=[{"type": "code_interpreter"}], + model="gpt-4-1106-preview" + ) + output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"}) + + Example using custom tools and AgentExecutor: + .. code-block:: python + + from langchain.agents.openai_assistant import OpenAIAssistantV2Runnable + from langchain.agents import AgentExecutor + from langchain.tools import E2BDataAnalysisTool + + + tools = [E2BDataAnalysisTool(api_key="...")] + agent = OpenAIAssistantV2Runnable.create_assistant( + name="langchain assistant e2b tool", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=tools, + model="gpt-4-1106-preview", + as_agent=True + ) + + agent_executor = AgentExecutor(agent=agent, tools=tools) + agent_executor.invoke({"content": "What's 10 - 4 raised to the 2.7"}) + + + Example using custom tools and custom execution: + .. code-block:: python + + from langchain.agents.openai_assistant import OpenAIAssistantV2Runnable + from langchain.agents import AgentExecutor + from langchain_core.agents import AgentFinish + from langchain.tools import E2BDataAnalysisTool + + + tools = [E2BDataAnalysisTool(api_key="...")] + agent = OpenAIAssistantV2Runnable.create_assistant( + name="langchain assistant e2b tool", + instructions="You are a personal math tutor. Write and run code to answer math questions.", + tools=tools, + model="gpt-4-1106-preview", + as_agent=True + ) + + def execute_agent(agent, tools, input): + tool_map = {tool.name: tool for tool in tools} + response = agent.invoke(input) + while not isinstance(response, AgentFinish): + tool_outputs = [] + for action in response: + tool_output = tool_map[action.tool].invoke(action.tool_input) + tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id}) + response = agent.invoke( + { + "tool_outputs": tool_outputs, + "run_id": action.run_id, + "thread_id": action.thread_id + } + ) + + return response + + response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}) + next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}) + + """ # noqa: E501 + + client: Any = Field(default_factory=_get_openai_client) + """OpenAI or AzureOpenAI client.""" + async_client: Any = None + """OpenAI or AzureOpenAI async client.""" + assistant_id: str + """OpenAI assistant id.""" + check_every_ms: float = 1_000.0 + """Frequency with which to check run progress in ms.""" + as_agent: bool = False + """Use as a LangChain agent, compatible with the AgentExecutor.""" + + @root_validator() + def validate_async_client(cls, values: dict) -> dict: + if values["async_client"] is None: + import openai + + api_key = values["client"].api_key + values["async_client"] = openai.AsyncOpenAI(api_key=api_key) + return values + + @classmethod + def create_assistant( + cls, + name: str, + instructions: str, + tools: Sequence[Union[BaseTool, dict]], + model: str, + *, + client: Optional[Union[openai.OpenAI, openai.AzureOpenAI]] = None, + tool_resources: Optional[Union[AssistantToolResources, dict, NotGiven]] = None, + **kwargs: Any, + ) -> OpenAIAssistantRunnable: + """Create an OpenAI Assistant and instantiate the Runnable. + + Args: + name: Assistant name. + instructions: Assistant instructions. + tools: Assistant tools. Can be passed in OpenAI format or as BaseTools. + tool_resources: Assistant tool resources. Can be passed in OpenAI format + model: Assistant model to use. + client: OpenAI or AzureOpenAI client. + Will create default OpenAI client (Assistant v2) if not specified. + + Returns: + OpenAIAssistantRunnable configured to run using the created assistant. + """ + + client = client or _get_openai_client() + if tool_resources is None: + from openai._types import NOT_GIVEN + + tool_resources = NOT_GIVEN + assistant = client.beta.assistants.create( + name=name, + instructions=instructions, + tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore + tool_resources=tool_resources, + model=model, + ) + return cls(assistant_id=assistant.id, client=client, **kwargs) + + def invoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> OutputType: + """Invoke assistant. + + Args: + input: Runnable input dict that can have: + content: User message when starting a new run. + thread_id: Existing thread to use. + run_id: Existing run to use. Should only be supplied when providing + the tool output for a required action after an initial invocation. + file_ids: (deprecated) File ids to include in new run. Use + 'attachments' instead + attachments: Assistant files to include in new run. (v2 API). + message_metadata: Metadata to associate with new message. + thread_metadata: Metadata to associate with new thread. Only relevant + when new thread being created. + instructions: Additional run instructions. + model: Override Assistant model for this run. + tools: Override Assistant tools for this run. + tool_resources: Override Assistant tool resources for this run (v2 API). + run_metadata: Metadata to associate with new run. + config: Runnable config: + + Return: + If self.as_agent, will return + Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise, + will return OpenAI types + Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. + """ + + config = ensure_config(config) + callback_manager = CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + inheritable_tags=config.get("tags"), + inheritable_metadata=config.get("metadata"), + ) + run_manager = callback_manager.on_chain_start( + dumpd(self), input, name=config.get("run_name") + ) + + files = _convert_file_ids_into_attachments(kwargs.get("file_ids", [])) + attachments = kwargs.get("attachments", []) + files + + try: + # Being run within AgentExecutor and there are tool outputs to submit. + if self.as_agent and input.get("intermediate_steps"): + tool_outputs = self._parse_intermediate_steps( + input["intermediate_steps"] + ) + run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs) + # Starting a new thread and a new run. + elif "thread_id" not in input: + thread = { + "messages": [ + { + "role": "user", + "content": input["content"], + "attachments": attachments, + "metadata": input.get("message_metadata"), + } + ], + "metadata": input.get("thread_metadata"), + } + run = self._create_thread_and_run(input, thread) + # Starting a new run in an existing thread. + elif "run_id" not in input: + _ = self.client.beta.threads.messages.create( + input["thread_id"], + content=input["content"], + role="user", + attachments=attachments, + metadata=input.get("message_metadata"), + ) + run = self._create_run(input) + # Submitting tool outputs to an existing run, outside the AgentExecutor + # framework. + else: + run = self.client.beta.threads.runs.submit_tool_outputs(**input) + run = self._wait_for_run(run.id, run.thread_id) + except BaseException as e: + run_manager.on_chain_error(e) + raise e + try: + response = self._get_response(run) + except BaseException as e: + run_manager.on_chain_error(e, metadata=run.dict()) + raise e + else: + run_manager.on_chain_end(response) + return response + + @classmethod + async def acreate_assistant( + cls, + name: str, + instructions: str, + tools: Sequence[Union[BaseTool, dict]], + model: str, + *, + async_client: Optional[ + Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI] + ] = None, + tool_resources: Optional[Union[AssistantToolResources, dict, NotGiven]] = None, + **kwargs: Any, + ) -> OpenAIAssistantRunnable: + """Create an AsyncOpenAI Assistant and instantiate the Runnable. + + Args: + name: Assistant name. + instructions: Assistant instructions. + tools: Assistant tools. Can be passed in OpenAI format or as BaseTools. + tool_resources: Assistant tool resources. Can be passed in OpenAI format + model: Assistant model to use. + async_client: AsyncOpenAI client. + Will create default async_client if not specified. + + Returns: + AsyncOpenAIAssistantRunnable configured to run using the created assistant. + """ + async_client = async_client or _get_openai_async_client() + if tool_resources is None: + from openai._types import NOT_GIVEN + + tool_resources = NOT_GIVEN + openai_tools = [_get_assistants_tool(tool) for tool in tools] + + assistant = await async_client.beta.assistants.create( + name=name, + instructions=instructions, + tools=openai_tools, # type: ignore + tool_resources=tool_resources, + model=model, + ) + return cls(assistant_id=assistant.id, async_client=async_client, **kwargs) + + async def ainvoke( + self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any + ) -> OutputType: + """Async invoke assistant. + + Args: + input: Runnable input dict that can have: + content: User message when starting a new run. + thread_id: Existing thread to use. + run_id: Existing run to use. Should only be supplied when providing + the tool output for a required action after an initial invocation. + file_ids: (deprecated) File ids to include in new run. Use + 'attachments' instead + attachments: Assistant files to include in new run. (v2 API). + message_metadata: Metadata to associate with new message. + thread_metadata: Metadata to associate with new thread. Only relevant + when new thread being created. + instructions: Additional run instructions. + model: Override Assistant model for this run. + tools: Override Assistant tools for this run. + tool_resources: Override Assistant tool resources for this run (v2 API). + run_metadata: Metadata to associate with new run. + config: Runnable config: + + Return: + If self.as_agent, will return + Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise, + will return OpenAI types + Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. + """ + + config = config or {} + callback_manager = CallbackManager.configure( + inheritable_callbacks=config.get("callbacks"), + inheritable_tags=config.get("tags"), + inheritable_metadata=config.get("metadata"), + ) + run_manager = callback_manager.on_chain_start( + dumpd(self), input, name=config.get("run_name") + ) + + files = _convert_file_ids_into_attachments(kwargs.get("file_ids", [])) + attachments = kwargs.get("attachments", []) + files + + try: + # Being run within AgentExecutor and there are tool outputs to submit. + if self.as_agent and input.get("intermediate_steps"): + tool_outputs = self._parse_intermediate_steps( + input["intermediate_steps"] + ) + run = await self.async_client.beta.threads.runs.submit_tool_outputs( + **tool_outputs + ) + # Starting a new thread and a new run. + elif "thread_id" not in input: + thread = { + "messages": [ + { + "role": "user", + "content": input["content"], + "attachments": attachments, + "metadata": input.get("message_metadata"), + } + ], + "metadata": input.get("thread_metadata"), + } + run = await self._acreate_thread_and_run(input, thread) + # Starting a new run in an existing thread. + elif "run_id" not in input: + _ = await self.async_client.beta.threads.messages.create( + input["thread_id"], + content=input["content"], + role="user", + attachments=attachments, + metadata=input.get("message_metadata"), + ) + run = await self._acreate_run(input) + # Submitting tool outputs to an existing run, outside the AgentExecutor + # framework. + else: + run = await self.async_client.beta.threads.runs.submit_tool_outputs( + **input + ) + run = await self._await_for_run(run.id, run.thread_id) + except BaseException as e: + run_manager.on_chain_error(e) + raise e + try: + response = self._get_response(run) + except BaseException as e: + run_manager.on_chain_error(e, metadata=run.dict()) + raise e + else: + run_manager.on_chain_end(response) + return response + + def _create_run(self, input: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k in ("instructions", "model", "tools", "tool_resources", "run_metadata") + } + return self.client.beta.threads.runs.create( + input["thread_id"], + assistant_id=self.assistant_id, + **params, + ) + + def _create_thread_and_run(self, input: dict, thread: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k in ("instructions", "model", "tools", "tool_resources", "run_metadata") + } + run = self.client.beta.threads.create_and_run( + assistant_id=self.assistant_id, + thread=thread, + **params, + ) + return run + + async def _acreate_run(self, input: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k in ("instructions", "model", "tools", "tool_resources" "run_metadata") + } + return await self.async_client.beta.threads.runs.create( + input["thread_id"], + assistant_id=self.assistant_id, + **params, + ) + + async def _acreate_thread_and_run(self, input: dict, thread: dict) -> Any: + params = { + k: v + for k, v in input.items() + if k in ("instructions", "model", "tools", "tool_resources", "run_metadata") + } + run = await self.async_client.beta.threads.create_and_run( + assistant_id=self.assistant_id, + thread=thread, + **params, + ) + return run diff --git a/libs/community/langchain_community/callbacks/manager.py b/libs/community/langchain_community/callbacks/manager.py index f5b4530ea29..ba942084953 100644 --- a/libs/community/langchain_community/callbacks/manager.py +++ b/libs/community/langchain_community/callbacks/manager.py @@ -28,7 +28,7 @@ bedrock_anthropic_callback_var: (ContextVar)[ wandb_tracing_callback_var: ContextVar[Optional[WandbTracer]] = ContextVar( "tracing_wandb_callback", default=None ) -comet_tracing_callback_var: ContextVar[Optional[CometTracer]] = ContextVar( # noqa: E501 +comet_tracing_callback_var: ContextVar[Optional[CometTracer]] = ContextVar( "tracing_comet_callback", default=None ) diff --git a/libs/community/langchain_community/callbacks/openai_info.py b/libs/community/langchain_community/callbacks/openai_info.py index 532f133b269..a551b723ea8 100644 --- a/libs/community/langchain_community/callbacks/openai_info.py +++ b/libs/community/langchain_community/callbacks/openai_info.py @@ -1,4 +1,5 @@ """Callback Handler that prints to std out.""" + import threading from typing import Any, Dict, List @@ -87,13 +88,15 @@ MODEL_COST_PER_1K_TOKENS = { # Fine Tuned input "babbage-002-finetuned": 0.0016, "davinci-002-finetuned": 0.012, - "gpt-3.5-turbo-0613-finetuned": 0.012, - "gpt-3.5-turbo-1106-finetuned": 0.012, + "gpt-3.5-turbo-0613-finetuned": 0.003, + "gpt-3.5-turbo-1106-finetuned": 0.003, + "gpt-3.5-turbo-0125-finetuned": 0.003, # Fine Tuned output "babbage-002-finetuned-completion": 0.0016, "davinci-002-finetuned-completion": 0.012, - "gpt-3.5-turbo-0613-finetuned-completion": 0.016, - "gpt-3.5-turbo-1106-finetuned-completion": 0.016, + "gpt-3.5-turbo-0613-finetuned-completion": 0.006, + "gpt-3.5-turbo-1106-finetuned-completion": 0.006, + "gpt-3.5-turbo-0125-finetuned-completion": 0.006, # Azure Fine Tuned input "babbage-002-azure-finetuned": 0.0004, "davinci-002-azure-finetuned": 0.002, diff --git a/libs/community/langchain_community/callbacks/tracers/comet.py b/libs/community/langchain_community/callbacks/tracers/comet.py index 6695db45573..099f39f82bf 100644 --- a/libs/community/langchain_community/callbacks/tracers/comet.py +++ b/libs/community/langchain_community/callbacks/tracers/comet.py @@ -66,9 +66,11 @@ class CometTracer(BaseTracer): run_dict: Dict[str, Any] = run.dict() if not run.parent_run_id: # This is the first run, which maps to a chain + metadata = run_dict["extra"].get("metadata", None) + chain_: "Chain" = self._chain.Chain( inputs=run_dict["inputs"], - metadata=None, + metadata=metadata, experiment_info=self._experiment_info.get(), ) self._chains_map[run.id] = chain_ diff --git a/libs/community/langchain_community/chat_loaders/__init__.py b/libs/community/langchain_community/chat_loaders/__init__.py index 22576c417f8..255b9b1598d 100644 --- a/libs/community/langchain_community/chat_loaders/__init__.py +++ b/libs/community/langchain_community/chat_loaders/__init__.py @@ -81,6 +81,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/chat_loaders/langsmith.py b/libs/community/langchain_community/chat_loaders/langsmith.py index 02a16d895e4..7d099632c0b 100644 --- a/libs/community/langchain_community/chat_loaders/langsmith.py +++ b/libs/community/langchain_community/chat_loaders/langsmith.py @@ -143,7 +143,7 @@ class LangSmithDatasetChatLoader(BaseChatLoader): :return: Iterator of chat sessions containing messages. """ - from langchain_community.adapters import openai as oai_adapter # noqa: E402 + from langchain_community.adapters import openai as oai_adapter data = self.client.read_dataset_openai_finetuning( dataset_name=self.dataset_name diff --git a/libs/community/langchain_community/chat_message_histories/__init__.py b/libs/community/langchain_community/chat_message_histories/__init__.py index 524ddaad65f..99b4aae14fc 100644 --- a/libs/community/langchain_community/chat_message_histories/__init__.py +++ b/libs/community/langchain_community/chat_message_histories/__init__.py @@ -82,6 +82,9 @@ if TYPE_CHECKING: from langchain_community.chat_message_histories.zep import ( ZepChatMessageHistory, ) + from langchain_community.chat_message_histories.zep_cloud import ( + ZepCloudChatMessageHistory, + ) __all__ = [ "AstraDBChatMessageHistory", @@ -105,6 +108,7 @@ __all__ = [ "UpstashRedisChatMessageHistory", "XataChatMessageHistory", "ZepChatMessageHistory", + "ZepCloudChatMessageHistory", ] _module_lookup = { @@ -129,6 +133,7 @@ _module_lookup = { "UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories.upstash_redis", # noqa: E501 "XataChatMessageHistory": "langchain_community.chat_message_histories.xata", "ZepChatMessageHistory": "langchain_community.chat_message_histories.zep", + "ZepCloudChatMessageHistory": "langchain_community.chat_message_histories.zep_cloud", # noqa: E501 } @@ -137,6 +142,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/chat_message_histories/cassandra.py b/libs/community/langchain_community/chat_message_histories/cassandra.py index 4960f20a2b0..1017cbe3952 100644 --- a/libs/community/langchain_community/chat_message_histories/cassandra.py +++ b/libs/community/langchain_community/chat_message_histories/cassandra.py @@ -3,10 +3,13 @@ from __future__ import annotations import json import uuid -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence + +from langchain_community.utilities.cassandra import SetupMode if TYPE_CHECKING: from cassandra.cluster import Session + from cassio.table.table_types import RowType from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.messages import ( @@ -19,19 +22,14 @@ DEFAULT_TABLE_NAME = "message_store" DEFAULT_TTL_SECONDS = None +def _rows_to_messages(rows: Iterable[RowType]) -> List[BaseMessage]: + message_blobs = [row["body_blob"] for row in rows][::-1] + items = [json.loads(message_blob) for message_blob in message_blobs] + messages = messages_from_dict(items) + return messages + + class CassandraChatMessageHistory(BaseChatMessageHistory): - """Chat message history that stores history in Cassandra. - - Args: - session_id: arbitrary key that is used to store the messages - of a single chat session. - session: Cassandra driver session. If not provided, it is resolved from cassio. - keyspace: Cassandra key space. If not provided, it is resolved from cassio. - table_name: name of the table to use. - ttl_seconds: time-to-live (seconds) for automatic expiration - of stored entries. None (default) for no expiration. - """ - def __init__( self, session_id: str, @@ -39,7 +37,22 @@ class CassandraChatMessageHistory(BaseChatMessageHistory): keyspace: Optional[str] = None, table_name: str = DEFAULT_TABLE_NAME, ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS, + *, + setup_mode: SetupMode = SetupMode.SYNC, ) -> None: + """Chat message history that stores history in Cassandra. + + Args: + session_id: arbitrary key that is used to store the messages + of a single chat session. + session: Cassandra driver session. + If not provided, it is resolved from cassio. + keyspace: Cassandra key space. If not provided, it is resolved from cassio. + table_name: name of the table to use. + ttl_seconds: time-to-live (seconds) for automatic expiration + of stored entries. None (default) for no expiration. + setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF). + """ try: from cassio.table import ClusteredCassandraTable except (ImportError, ModuleNotFoundError): @@ -49,6 +62,9 @@ class CassandraChatMessageHistory(BaseChatMessageHistory): ) self.session_id = session_id self.ttl_seconds = ttl_seconds + kwargs: Dict[str, Any] = {} + if setup_mode == SetupMode.ASYNC: + kwargs["async_setup"] = True self.table = ClusteredCassandraTable( session=session, keyspace=keyspace, @@ -56,21 +72,26 @@ class CassandraChatMessageHistory(BaseChatMessageHistory): ttl_seconds=ttl_seconds, primary_key_type=["TEXT", "TIMEUUID"], ordering_in_partition="DESC", + skip_provisioning=setup_mode == SetupMode.OFF, + **kwargs, ) @property def messages(self) -> List[BaseMessage]: # type: ignore """Retrieve all session messages from DB""" # The latest are returned, in chronological order - message_blobs = [ - row["body_blob"] - for row in self.table.get_partition( - partition_id=self.session_id, - ) - ][::-1] - items = [json.loads(message_blob) for message_blob in message_blobs] - messages = messages_from_dict(items) - return messages + rows = self.table.get_partition( + partition_id=self.session_id, + ) + return _rows_to_messages(rows) + + async def aget_messages(self) -> List[BaseMessage]: + """Retrieve all session messages from DB""" + # The latest are returned, in chronological order + rows = await self.table.aget_partition( + partition_id=self.session_id, + ) + return _rows_to_messages(rows) def add_message(self, message: BaseMessage) -> None: """Write a message to the table @@ -86,6 +107,20 @@ class CassandraChatMessageHistory(BaseChatMessageHistory): ttl_seconds=self.ttl_seconds, ) + async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: + for message in messages: + this_row_id = uuid.uuid1() + await self.table.aput( + partition_id=self.session_id, + row_id=this_row_id, + body_blob=json.dumps(message_to_dict(message)), + ttl_seconds=self.ttl_seconds, + ) + def clear(self) -> None: """Clear session memory from DB""" self.table.delete_partition(self.session_id) + + async def aclear(self) -> None: + """Clear session memory from DB""" + await self.table.adelete_partition(self.session_id) diff --git a/libs/community/langchain_community/chat_message_histories/cosmos_db.py b/libs/community/langchain_community/chat_message_histories/cosmos_db.py index 4210d7a7076..87fc1e8be50 100644 --- a/libs/community/langchain_community/chat_message_histories/cosmos_db.py +++ b/libs/community/langchain_community/chat_message_histories/cosmos_db.py @@ -62,7 +62,7 @@ class CosmosDBChatMessageHistory(BaseChatMessageHistory): self.messages: List[BaseMessage] = [] try: - from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 + from azure.cosmos import ( # pylint: disable=import-outside-toplevel CosmosClient, ) except ImportError as exc: @@ -91,7 +91,7 @@ class CosmosDBChatMessageHistory(BaseChatMessageHistory): Use this function or the context manager to make sure your database is ready. """ try: - from azure.cosmos import ( # pylint: disable=import-outside-toplevel # noqa: E501 + from azure.cosmos import ( # pylint: disable=import-outside-toplevel PartitionKey, ) except ImportError as exc: @@ -128,7 +128,7 @@ class CosmosDBChatMessageHistory(BaseChatMessageHistory): if not self._container: raise ValueError("Container not initialized") try: - from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel # noqa: E501 + from azure.cosmos.exceptions import ( # pylint: disable=import-outside-toplevel CosmosHttpResponseError, ) except ImportError as exc: diff --git a/libs/community/langchain_community/chat_message_histories/zep_cloud.py b/libs/community/langchain_community/chat_message_histories/zep_cloud.py new file mode 100644 index 00000000000..a97cfcfbcc5 --- /dev/null +++ b/libs/community/langchain_community/chat_message_histories/zep_cloud.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence + +from langchain_core.chat_history import BaseChatMessageHistory +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, +) + +if TYPE_CHECKING: + from zep_cloud import ( + Memory, + MemoryGetRequestMemoryType, + MemorySearchResult, + Message, + NotFoundError, + RoleType, + SearchScope, + SearchType, + ) + +logger = logging.getLogger(__name__) + + +def condense_zep_memory_into_human_message(zep_memory: Memory) -> BaseMessage: + prompt = "" + if zep_memory.facts: + prompt = "\n".join(zep_memory.facts) + if zep_memory.summary and zep_memory.summary.content: + prompt += "\n" + zep_memory.summary.content + for msg in zep_memory.messages or []: + prompt += f"\n{msg.role or msg.role_type}: {msg.content}" + return HumanMessage(content=prompt) + + +def get_zep_message_role_type(role: str) -> RoleType: + if role == "human": + return "user" + elif role == "ai": + return "assistant" + elif role == "system": + return "system" + elif role == "function": + return "function" + elif role == "tool": + return "tool" + else: + return "system" + + +class ZepCloudChatMessageHistory(BaseChatMessageHistory): + """Chat message history that uses Zep Cloud as a backend. + + Recommended usage:: + + # Set up Zep Chat History + zep_chat_history = ZepChatMessageHistory( + session_id=session_id, + api_key=, + ) + + # Use a standard ConversationBufferMemory to encapsulate the Zep chat history + memory = ConversationBufferMemory( + memory_key="chat_history", chat_memory=zep_chat_history + ) + + Zep - Recall, understand, and extract data from chat histories. + Power personalized AI experiences. + + Zep is a long-term memory service for AI Assistant apps. + With Zep, you can provide AI assistants with the + ability to recall past conversations, + no matter how distant, + while also reducing hallucinations, latency, and cost. + + see Zep Cloud Docs: https://help.getzep.com + + This class is a thin wrapper around the zep-python package. Additional + Zep functionality is exposed via the `zep_summary`, `zep_messages` and `zep_facts` + properties. + + For more information on the zep-python package, see: + https://github.com/getzep/zep-python + """ + + def __init__( + self, + session_id: str, + api_key: str, + *, + memory_type: Optional[MemoryGetRequestMemoryType] = None, + lastn: Optional[int] = None, + ai_prefix: Optional[str] = None, + human_prefix: Optional[str] = None, + summary_instruction: Optional[str] = None, + ) -> None: + try: + from zep_cloud.client import AsyncZep, Zep + except ImportError: + raise ImportError( + "Could not import zep-cloud package. " + "Please install it with `pip install zep-cloud`." + ) + + self.zep_client = Zep(api_key=api_key) + self.zep_client_async = AsyncZep(api_key=api_key) + self.session_id = session_id + + self.memory_type = memory_type or "perpetual" + self.lastn = lastn + self.ai_prefix = ai_prefix or "ai" + self.human_prefix = human_prefix or "human" + self.summary_instruction = summary_instruction + + @property + def messages(self) -> List[BaseMessage]: # type: ignore + """Retrieve messages from Zep memory""" + zep_memory: Optional[Memory] = self._get_memory() + if not zep_memory: + return [] + + return [condense_zep_memory_into_human_message(zep_memory)] + + @property + def zep_messages(self) -> List[Message]: + """Retrieve summary from Zep memory""" + zep_memory: Optional[Memory] = self._get_memory() + if not zep_memory: + return [] + + return zep_memory.messages or [] + + @property + def zep_summary(self) -> Optional[str]: + """Retrieve summary from Zep memory""" + zep_memory: Optional[Memory] = self._get_memory() + if not zep_memory or not zep_memory.summary: + return None + + return zep_memory.summary.content + + @property + def zep_facts(self) -> Optional[List[str]]: + """Retrieve conversation facts from Zep memory""" + if self.memory_type != "perpetual": + return None + zep_memory: Optional[Memory] = self._get_memory() + if not zep_memory or not zep_memory.facts: + return None + + return zep_memory.facts + + def _get_memory(self) -> Optional[Memory]: + """Retrieve memory from Zep""" + from zep_cloud import NotFoundError + + try: + zep_memory: Memory = self.zep_client.memory.get( + self.session_id, memory_type=self.memory_type, lastn=self.lastn + ) + except NotFoundError: + logger.warning( + f"Session {self.session_id} not found in Zep. Returning None" + ) + return None + return zep_memory + + def add_user_message( # type: ignore[override] + self, message: str, metadata: Optional[Dict[str, Any]] = None + ) -> None: + """Convenience method for adding a human message string to the store. + + Args: + message: The string contents of a human message. + metadata: Optional metadata to attach to the message. + """ + self.add_message(HumanMessage(content=message), metadata=metadata) + + def add_ai_message( # type: ignore[override] + self, message: str, metadata: Optional[Dict[str, Any]] = None + ) -> None: + """Convenience method for adding an AI message string to the store. + + Args: + message: The string contents of an AI message. + metadata: Optional metadata to attach to the message. + """ + self.add_message(AIMessage(content=message), metadata=metadata) + + def add_message( + self, message: BaseMessage, metadata: Optional[Dict[str, Any]] = None + ) -> None: + """Append the message to the Zep memory history""" + from zep_cloud import Message + + self.zep_client.memory.add( + self.session_id, + messages=[ + Message( + content=str(message.content), + role=message.type, + role_type=get_zep_message_role_type(message.type), + metadata=metadata, + ) + ], + ) + + def add_messages(self, messages: Sequence[BaseMessage]) -> None: + """Append the messages to the Zep memory history""" + from zep_cloud import Message + + zep_messages = [ + Message( + content=str(message.content), + role=message.type, + role_type=get_zep_message_role_type(message.type), + metadata=message.additional_kwargs.get("metadata", None), + ) + for message in messages + ] + + self.zep_client.memory.add(self.session_id, messages=zep_messages) + + async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None: + """Append the messages to the Zep memory history asynchronously""" + from zep_cloud import Message + + zep_messages = [ + Message( + content=str(message.content), + role=message.type, + role_type=get_zep_message_role_type(message.type), + metadata=message.additional_kwargs.get("metadata", None), + ) + for message in messages + ] + + await self.zep_client_async.memory.add(self.session_id, messages=zep_messages) + + def search( + self, + query: str, + metadata: Optional[Dict] = None, + search_scope: SearchScope = "messages", + search_type: SearchType = "similarity", + mmr_lambda: Optional[float] = None, + limit: Optional[int] = None, + ) -> List[MemorySearchResult]: + """Search Zep memory for messages matching the query""" + + return self.zep_client.memory.search( + self.session_id, + text=query, + metadata=metadata, + search_scope=search_scope, + search_type=search_type, + mmr_lambda=mmr_lambda, + limit=limit, + ) + + def clear(self) -> None: + """Clear session memory from Zep. Note that Zep is long-term storage for memory + and this is not advised unless you have specific data retention requirements. + """ + try: + self.zep_client.memory.delete(self.session_id) + except NotFoundError: + logger.warning( + f"Session {self.session_id} not found in Zep. Skipping delete." + ) + + async def aclear(self) -> None: + """Clear session memory from Zep asynchronously. + Note that Zep is long-term storage for memory and this is not advised + unless you have specific data retention requirements. + """ + try: + await self.zep_client_async.memory.delete(self.session_id) + except NotFoundError: + logger.warning( + f"Session {self.session_id} not found in Zep. Skipping delete." + ) diff --git a/libs/community/langchain_community/chat_models/__init__.py b/libs/community/langchain_community/chat_models/__init__.py index bfd52359962..ba9ee7c108d 100644 --- a/libs/community/langchain_community/chat_models/__init__.py +++ b/libs/community/langchain_community/chat_models/__init__.py @@ -120,6 +120,7 @@ if TYPE_CHECKING: from langchain_community.chat_models.mlx import ( ChatMLX, ) + from langchain_community.chat_models.octoai import ChatOctoAI from langchain_community.chat_models.ollama import ( ChatOllama, ) @@ -171,6 +172,7 @@ __all__ = [ "ChatBaichuan", "ChatCohere", "ChatCoze", + "ChatOctoAI", "ChatDatabricks", "ChatDeepInfra", "ChatEverlyAI", @@ -271,6 +273,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/chat_models/azure_openai.py b/libs/community/langchain_community/chat_models/azure_openai.py index 3834668c1f9..3238cabe746 100644 --- a/libs/community/langchain_community/chat_models/azure_openai.py +++ b/libs/community/langchain_community/chat_models/azure_openai.py @@ -85,7 +85,7 @@ class AzureChatOpenAI(ChatOpenAI): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. - """ # noqa: E501 + """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. diff --git a/libs/community/langchain_community/chat_models/azureml_endpoint.py b/libs/community/langchain_community/chat_models/azureml_endpoint.py index 5f2a3f59c34..2629c5a0ba2 100644 --- a/libs/community/langchain_community/chat_models/azureml_endpoint.py +++ b/libs/community/langchain_community/chat_models/azureml_endpoint.py @@ -233,7 +233,7 @@ class AzureMLChatOnlineEndpoint(BaseChatModel, AzureMLBaseEndpoint): endpoint_api_key="my-api-key", content_formatter=chat_content_formatter, ) - """ # noqa: E501 + """ @property def _identifying_params(self) -> Dict[str, Any]: diff --git a/libs/community/langchain_community/chat_models/javelin_ai_gateway.py b/libs/community/langchain_community/chat_models/javelin_ai_gateway.py index 6c90103fae9..c45b034e493 100644 --- a/libs/community/langchain_community/chat_models/javelin_ai_gateway.py +++ b/libs/community/langchain_community/chat_models/javelin_ai_gateway.py @@ -18,7 +18,7 @@ from langchain_core.outputs import ( ChatGeneration, ChatResult, ) -from langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr +from langchain_core.pydantic_v1 import BaseModel, Extra, Field, SecretStr logger = logging.getLogger(__name__) @@ -65,9 +65,14 @@ class ChatJavelinAIGateway(BaseChatModel): client: Any """javelin client.""" - javelin_api_key: Optional[SecretStr] = None + javelin_api_key: Optional[SecretStr] = Field(None, alias="api_key") """The API key for the Javelin AI Gateway.""" + class Config: + """Configuration for this pydantic object.""" + + allow_population_by_field_name = True + def __init__(self, **kwargs: Any): try: from javelin_sdk import ( diff --git a/libs/community/langchain_community/chat_models/mlflow.py b/libs/community/langchain_community/chat_models/mlflow.py index c09a3b13c74..b800d2221e3 100644 --- a/libs/community/langchain_community/chat_models/mlflow.py +++ b/libs/community/langchain_community/chat_models/mlflow.py @@ -174,9 +174,16 @@ class ChatMlflow(BaseChatModel): ) # TODO: check if `_client.predict_stream` is available. chunk_iter = self._client.predict_stream(endpoint=self.endpoint, inputs=data) + first_chunk_role = None for chunk in chunk_iter: choice = chunk["choices"][0] - chunk = ChatMlflow._convert_delta_to_message_chunk(choice["delta"]) + + chunk_delta = choice["delta"] + if first_chunk_role is None: + first_chunk_role = chunk_delta.get("role") + chunk = ChatMlflow._convert_delta_to_message_chunk( + chunk_delta, first_chunk_role + ) generation_info = {} if finish_reason := choice.get("finish_reason"): @@ -225,8 +232,10 @@ class ChatMlflow(BaseChatModel): return ChatMessage(content=content, role=role) @staticmethod - def _convert_delta_to_message_chunk(_dict: Mapping[str, Any]) -> BaseMessageChunk: - role = _dict["role"] + def _convert_delta_to_message_chunk( + _dict: Mapping[str, Any], default_role: str + ) -> BaseMessageChunk: + role = _dict.get("role", default_role) content = _dict["content"] if role == "user": return HumanMessageChunk(content=content) diff --git a/libs/community/langchain_community/chat_models/sparkllm.py b/libs/community/langchain_community/chat_models/sparkllm.py index 7d122f0a0c8..20dc1380c9d 100644 --- a/libs/community/langchain_community/chat_models/sparkllm.py +++ b/libs/community/langchain_community/chat_models/sparkllm.py @@ -43,6 +43,9 @@ from langchain_core.utils import ( logger = logging.getLogger(__name__) +SPARK_API_URL = "wss://spark-api.xf-yun.com/v3.5/chat" +SPARK_LLM_DOMAIN = "generalv3.5" + def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): @@ -108,7 +111,7 @@ class ChatSparkLLM(BaseChatModel): Extra infos: 1. Get app_id, api_key, api_secret from the iFlyTek Open Platform Console: https://console.xfyun.cn/services/bm35 - 2. By default, iFlyTek Spark LLM V3.0 is invoked. + 2. By default, iFlyTek Spark LLM V3.5 is invoked. If you need to invoke other versions, please configure the corresponding parameters(spark_api_url and spark_llm_domain) according to the document: https://www.xfyun.cn/doc/spark/Web.html @@ -134,17 +137,31 @@ class ChatSparkLLM(BaseChatModel): } client: Any = None #: :meta private: - spark_app_id: Optional[str] = None + spark_app_id: Optional[str] = Field(default=None, alias="app_id") + """Automatically inferred from env var `IFLYTEK_SPARK_APP_ID` + if not provided.""" spark_api_key: Optional[str] = Field(default=None, alias="api_key") - spark_api_secret: Optional[str] = None - spark_api_url: Optional[str] = None - spark_llm_domain: Optional[str] = None + """Automatically inferred from env var `IFLYTEK_SPARK_API_KEY` + if not provided.""" + spark_api_secret: Optional[str] = Field(default=None, alias="api_secret") + """Automatically inferred from env var `IFLYTEK_SPARK_API_SECRET` + if not provided.""" + spark_api_url: Optional[str] = Field(default=None, alias="api_url") + """Base URL path for API requests, leave blank if not using a proxy or service + emulator.""" + spark_llm_domain: Optional[str] = Field(default=None, alias="model") + """Model name to use.""" spark_user_id: str = "lc_user" streaming: bool = False + """Whether to stream the results or not.""" request_timeout: int = Field(30, alias="timeout") + """request timeout for chat http requests""" temperature: float = Field(default=0.5) + """What sampling temperature to use.""" top_k: int = 4 + """What search sampling control to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for API call not explicitly specified.""" class Config: """Configuration for this pydantic object.""" @@ -199,13 +216,13 @@ class ChatSparkLLM(BaseChatModel): values, "spark_api_url", "IFLYTEK_SPARK_API_URL", - "wss://spark-api.xf-yun.com/v3.1/chat", + SPARK_API_URL, ) values["spark_llm_domain"] = get_from_dict_or_env( values, "spark_llm_domain", "IFLYTEK_SPARK_LLM_DOMAIN", - "generalv3", + SPARK_LLM_DOMAIN, ) # put extra params into model_kwargs values["model_kwargs"]["temperature"] = values["temperature"] or cls.temperature @@ -307,12 +324,10 @@ class _SparkLLMClient: "Please install it with `pip install websocket-client`." ) - self.api_url = ( - "wss://spark-api.xf-yun.com/v3.1/chat" if not api_url else api_url - ) + self.api_url = SPARK_API_URL if not api_url else api_url self.app_id = app_id self.model_kwargs = model_kwargs - self.spark_domain = spark_domain or "generalv3" + self.spark_domain = spark_domain or SPARK_LLM_DOMAIN self.queue: Queue[Dict] = Queue() self.blocking_message = {"content": "", "role": "assistant"} self.api_key = api_key diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 10d50c0a373..225dfb7c9d4 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -241,8 +241,14 @@ class ChatTongyi(BaseChatModel): client: Any #: :meta private: model_name: str = Field(default="qwen-turbo", alias="model") - - """Model name to use.""" + """Model name to use. + callable multimodal model: + - qwen-vl-v1 + - qwen-vl-chat-v1 + - qwen-audio-turbo + - qwen-vl-plus + - qwen-vl-max + """ model_kwargs: Dict[str, Any] = Field(default_factory=dict) top_p: float = 0.8 @@ -280,18 +286,34 @@ class ChatTongyi(BaseChatModel): "Could not import dashscope python package. " "Please install it with `pip install dashscope --upgrade`." ) - try: - if "vl" in values["model_name"]: + dashscope_multimodal_models = [ + "qwen-vl-v1", + "qwen-vl-chat-v1", + "qwen-audio-turbo", + "qwen-vl-plus", + "qwen-vl-max", + ] + if ( + values["model_name"] in dashscope_multimodal_models + or "vl" in values["model_name"] + ): + try: values["client"] = dashscope.MultiModalConversation - else: + except AttributeError: + raise ValueError( + "`dashscope` has no `MultiModalConversation` attribute, this is " + "likely due to an old version of the dashscope package. Try " + "upgrading it with `pip install --upgrade dashscope`." + ) + else: + try: values["client"] = dashscope.Generation - except AttributeError: - raise ValueError( - "`dashscope` has no `Generation` attribute, this is likely " - "due to an old version of the dashscope package. Try upgrading it " - "with `pip install --upgrade dashscope`." - ) - + except AttributeError: + raise ValueError( + "`dashscope` has no `Generation` attribute, this is likely " + "due to an old version of the dashscope package. Try upgrading it " + "with `pip install --upgrade dashscope`." + ) return values @property diff --git a/libs/community/langchain_community/docstore/__init__.py b/libs/community/langchain_community/docstore/__init__.py index c7d2e87c97f..ea98f747acc 100644 --- a/libs/community/langchain_community/docstore/__init__.py +++ b/libs/community/langchain_community/docstore/__init__.py @@ -29,8 +29,6 @@ if TYPE_CHECKING: Wikipedia, ) -__all__ = ["DocstoreFn", "InMemoryDocstore", "Wikipedia"] - _module_lookup = { "DocstoreFn": "langchain_community.docstore.arbitrary_fn", "InMemoryDocstore": "langchain_community.docstore.in_memory", @@ -45,4 +43,4 @@ def __getattr__(name: str) -> Any: raise AttributeError(f"module {__name__} has no attribute {name}") -__all__ = list(_module_lookup.keys()) +__all__ = ["DocstoreFn", "InMemoryDocstore", "Wikipedia"] diff --git a/libs/community/langchain_community/document_compressors/__init__.py b/libs/community/langchain_community/document_compressors/__init__.py index b3241a9b114..27a15c151d2 100644 --- a/libs/community/langchain_community/document_compressors/__init__.py +++ b/libs/community/langchain_community/document_compressors/__init__.py @@ -6,7 +6,7 @@ if TYPE_CHECKING: FlashrankRerank, ) from langchain_community.document_compressors.jina_rerank import ( - JinaRerank, # noqa: F401 + JinaRerank, ) from langchain_community.document_compressors.llmlingua_filter import ( LLMLinguaCompressor, @@ -14,13 +14,16 @@ if TYPE_CHECKING: from langchain_community.document_compressors.openvino_rerank import ( OpenVINOReranker, ) + from langchain_community.document_compressors.rankllm_rerank import ( + RankLLMRerank, + ) -__all__ = ["LLMLinguaCompressor", "OpenVINOReranker", "FlashrankRerank"] _module_lookup = { "LLMLinguaCompressor": "langchain_community.document_compressors.llmlingua_filter", "OpenVINOReranker": "langchain_community.document_compressors.openvino_rerank", "JinaRerank": "langchain_community.document_compressors.jina_rerank", + "RankLLMRerank": "langchain_community.document_compressors.rankllm_rerank", "FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank", } @@ -32,4 +35,10 @@ def __getattr__(name: str) -> Any: raise AttributeError(f"module {__name__} has no attribute {name}") -__all__ = list(_module_lookup.keys()) +__all__ = [ + "LLMLinguaCompressor", + "OpenVINOReranker", + "FlashrankRerank", + "JinaRerank", + "RankLLMRerank", +] diff --git a/libs/community/langchain_community/document_compressors/rankllm_rerank.py b/libs/community/langchain_community/document_compressors/rankllm_rerank.py new file mode 100644 index 00000000000..2864df6fa33 --- /dev/null +++ b/libs/community/langchain_community/document_compressors/rankllm_rerank.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +from copy import deepcopy +from enum import Enum +from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence + +from langchain.retrievers.document_compressors.base import BaseDocumentCompressor +from langchain_core.callbacks.manager import Callbacks +from langchain_core.documents import Document +from langchain_core.pydantic_v1 import Extra, Field, PrivateAttr, root_validator +from langchain_core.utils import get_from_dict_or_env + +if TYPE_CHECKING: + from rank_llm.data import Candidate, Query, Request +else: + # Avoid pydantic annotation issues when actually instantiating + # while keeping this import optional + try: + from rank_llm.data import Candidate, Query, Request + except ImportError: + pass + + +class RankLLMRerank(BaseDocumentCompressor): + """Document compressor using Flashrank interface.""" + + client: Any = None + """RankLLM client to use for compressing documents""" + top_n: int = Field(default=3) + """Top N documents to return.""" + model: str = Field(default="zephyr") + """Name of model to use for reranking.""" + step_size: int = Field(default=10) + """Step size for moving sliding window.""" + gpt_model: str = Field(default="gpt-3.5-turbo") + """OpenAI model name.""" + _retriever: Any = PrivateAttr() + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + arbitrary_types_allowed = True + + @root_validator(pre=True) + def validate_environment(cls, values: Dict) -> Dict: + """Validate python package exists in environment.""" + + if not values.get("client"): + client_name = values.get("model", "zephyr") + + try: + model_enum = ModelType(client_name.lower()) + except ValueError: + raise ValueError( + "Unsupported model type. Please use 'vicuna', 'zephyr', or 'gpt'." + ) + + try: + if model_enum == ModelType.VICUNA: + from rank_llm.rerank.vicuna_reranker import VicunaReranker + + values["client"] = VicunaReranker() + elif model_enum == ModelType.ZEPHYR: + from rank_llm.rerank.zephyr_reranker import ZephyrReranker + + values["client"] = ZephyrReranker() + elif model_enum == ModelType.GPT: + from rank_llm.rerank.rank_gpt import SafeOpenai + from rank_llm.rerank.reranker import Reranker + + openai_api_key = get_from_dict_or_env( + values, "open_api_key", "OPENAI_API_KEY" + ) + + agent = SafeOpenai( + model=values["gpt_model"], + context_size=4096, + keys=openai_api_key, + ) + values["client"] = Reranker(agent) + + except ImportError: + raise ImportError( + "Could not import rank_llm python package. " + "Please install it with `pip install rank_llm`." + ) + + return values + + def compress_documents( + self, + documents: Sequence[Document], + query: str, + callbacks: Optional[Callbacks] = None, + ) -> Sequence[Document]: + request = Request( + query=Query(text=query, qid=1), + candidates=[ + Candidate(doc={"text": doc.page_content}, docid=index, score=1) + for index, doc in enumerate(documents) + ], + ) + + rerank_results = self.client.rerank( + request, + rank_end=len(documents), + window_size=min(20, len(documents)), + step=10, + ) + + final_results = [] + for res in rerank_results.candidates: + doc = documents[int(res.docid)] + doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata)) + final_results.append(doc_copy) + + return final_results[: self.top_n] + + +class ModelType(Enum): + VICUNA = "vicuna" + ZEPHYR = "zephyr" + GPT = "gpt" diff --git a/libs/community/langchain_community/document_loaders/__init__.py b/libs/community/langchain_community/document_loaders/__init__.py index 5a5f1e4c343..7f111b1aae9 100644 --- a/libs/community/langchain_community/document_loaders/__init__.py +++ b/libs/community/langchain_community/document_loaders/__init__.py @@ -214,7 +214,7 @@ if TYPE_CHECKING: GitHubIssuesLoader, ) from langchain_community.document_loaders.glue_catalog import ( - GlueCatalogLoader, # noqa: F401 + GlueCatalogLoader, ) from langchain_community.document_loaders.google_speech_to_text import ( GoogleSpeechToTextLoader, @@ -332,8 +332,8 @@ if TYPE_CHECKING: OracleAutonomousDatabaseLoader, ) from langchain_community.document_loaders.oracleai import ( - OracleDocLoader, # noqa: F401 - OracleTextSplitter, # noqa: F401 + OracleDocLoader, + OracleTextSplitter, ) from langchain_community.document_loaders.org_mode import ( UnstructuredOrgModeLoader, @@ -403,6 +403,9 @@ if TYPE_CHECKING: from langchain_community.document_loaders.s3_file import ( S3FileLoader, ) + from langchain_community.document_loaders.scrapfly import ( + ScrapflyLoader, + ) from langchain_community.document_loaders.sharepoint import ( SharePointLoader, ) @@ -654,6 +657,7 @@ _module_lookup = { "RocksetLoader": "langchain_community.document_loaders.rocksetdb", "S3DirectoryLoader": "langchain_community.document_loaders.s3_directory", "S3FileLoader": "langchain_community.document_loaders.s3_file", + "ScrapflyLoader": "langchain_community.document_loaders.scrapfly", "SQLDatabaseLoader": "langchain_community.document_loaders.sql_database", "SRTLoader": "langchain_community.document_loaders.srt", "SeleniumURLLoader": "langchain_community.document_loaders.url_selenium", @@ -854,6 +858,7 @@ __all__ = [ "RocksetLoader", "S3DirectoryLoader", "S3FileLoader", + "ScrapflyLoader", "SQLDatabaseLoader", "SRTLoader", "SeleniumURLLoader", diff --git a/libs/community/langchain_community/document_loaders/async_html.py b/libs/community/langchain_community/document_loaders/async_html.py index 832ef114446..5bb9d897512 100644 --- a/libs/community/langchain_community/document_loaders/async_html.py +++ b/libs/community/langchain_community/document_loaders/async_html.py @@ -64,6 +64,7 @@ class AsyncHtmlLoader(BaseLoader): ignore_load_errors: bool = False, *, preserve_order: bool = True, + trust_env: bool = False, ): """Initialize with a webpage path.""" @@ -104,6 +105,8 @@ class AsyncHtmlLoader(BaseLoader): self.ignore_load_errors = ignore_load_errors self.preserve_order = preserve_order + self.trust_env = trust_env + def _fetch_valid_connection_docs(self, url: str) -> Any: if self.ignore_load_errors: try: @@ -126,7 +129,7 @@ class AsyncHtmlLoader(BaseLoader): async def _fetch( self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5 ) -> str: - async with aiohttp.ClientSession() as session: + async with aiohttp.ClientSession(trust_env=self.trust_env) as session: for i in range(retries): try: async with session.get( diff --git a/libs/community/langchain_community/document_loaders/base_o365.py b/libs/community/langchain_community/document_loaders/base_o365.py index ddf95bdc764..33a7c5a818a 100644 --- a/libs/community/langchain_community/document_loaders/base_o365.py +++ b/libs/community/langchain_community/document_loaders/base_o365.py @@ -1,4 +1,5 @@ """Base class for all loaders that uses O365 Package""" + from __future__ import annotations import logging @@ -6,8 +7,8 @@ import os import tempfile from abc import abstractmethod from enum import Enum -from pathlib import Path -from typing import TYPE_CHECKING, Dict, Iterable, List, Sequence, Union +from pathlib import Path, PurePath +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Sequence, Union from langchain_core.pydantic_v1 import ( BaseModel, @@ -108,14 +109,31 @@ class O365BaseLoader(BaseLoader, BaseModel): """ file_mime_types = self._fetch_mime_types items = folder.get_items() + metadata_dict: Dict[str, Dict[str, Any]] = {} with tempfile.TemporaryDirectory() as temp_dir: os.makedirs(os.path.dirname(temp_dir), exist_ok=True) for file in items: if file.is_file: if file.mime_type in list(file_mime_types.values()): file.download(to_path=temp_dir, chunk_size=self.chunk_size) + metadata_dict[file.name] = { + "source": file.web_url, + "mime_type": file.mime_type, + "created": file.created, + "modified": file.modified, + "created_by": str(file.created_by), + "modified_by": str(file.modified_by), + "description": file.description, + } + loader = FileSystemBlobLoader(path=temp_dir) - yield from loader.yield_blobs() + for blob in loader.yield_blobs(): + if not isinstance(blob.path, PurePath): + raise NotImplementedError("Expected blob path to be a PurePath") + if blob.path: + file_metadata_ = metadata_dict.get(str(blob.path), {}) + blob.metadata.update(file_metadata_) + yield blob if self.recursive: for subfolder in folder.get_child_folders(): yield from self._load_from_folder(subfolder) diff --git a/libs/community/langchain_community/document_loaders/blackboard.py b/libs/community/langchain_community/document_loaders/blackboard.py index 682eafb3ceb..0141f2af916 100644 --- a/libs/community/langchain_community/document_loaders/blackboard.py +++ b/libs/community/langchain_community/document_loaders/blackboard.py @@ -31,7 +31,7 @@ class BlackboardLoader(WebBaseLoader): ) documents = loader.load() - """ # noqa: E501 + """ def __init__( self, diff --git a/libs/community/langchain_community/document_loaders/blob_loaders/__init__.py b/libs/community/langchain_community/document_loaders/blob_loaders/__init__.py index 4fb20f10d00..95907e77ffc 100644 --- a/libs/community/langchain_community/document_loaders/blob_loaders/__init__.py +++ b/libs/community/langchain_community/document_loaders/blob_loaders/__init__.py @@ -4,6 +4,9 @@ from typing import TYPE_CHECKING, Any from langchain_core.document_loaders import Blob, BlobLoader if TYPE_CHECKING: + from langchain_community.document_loaders.blob_loaders.cloud_blob_loader import ( + CloudBlobLoader, + ) from langchain_community.document_loaders.blob_loaders.file_system import ( FileSystemBlobLoader, ) @@ -13,6 +16,9 @@ if TYPE_CHECKING: _module_lookup = { + "CloudBlobLoader": ( + "langchain_community.document_loaders.blob_loaders.cloud_blob_loader" + ), "FileSystemBlobLoader": ( "langchain_community.document_loaders.blob_loaders.file_system" ), @@ -32,6 +38,7 @@ def __getattr__(name: str) -> Any: __all__ = [ "BlobLoader", "Blob", + "CloudBlobLoader", "FileSystemBlobLoader", "YoutubeAudioLoader", ] diff --git a/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py b/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py new file mode 100644 index 00000000000..2fa866e86c6 --- /dev/null +++ b/libs/community/langchain_community/document_loaders/blob_loaders/cloud_blob_loader.py @@ -0,0 +1,295 @@ +"""Use to load blobs from the local file system.""" +import contextlib +import mimetypes +import tempfile +from io import BufferedReader, BytesIO +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Callable, + Generator, + Iterable, + Iterator, + Optional, + Sequence, + TypeVar, + Union, +) +from urllib.parse import urlparse + +if TYPE_CHECKING: + from cloudpathlib import AnyPath + +from langchain_community.document_loaders.blob_loaders.schema import ( + Blob, + BlobLoader, +) + +T = TypeVar("T") + + +class _CloudBlob(Blob): + def as_string(self) -> str: + """Read data as a string.""" + from cloudpathlib import AnyPath + + if self.data is None and self.path: + return AnyPath(self.path).read_text(encoding=self.encoding) # type: ignore + elif isinstance(self.data, bytes): + return self.data.decode(self.encoding) + elif isinstance(self.data, str): + return self.data + else: + raise ValueError(f"Unable to get string for blob {self}") + + def as_bytes(self) -> bytes: + """Read data as bytes.""" + from cloudpathlib import AnyPath + + if isinstance(self.data, bytes): + return self.data + elif isinstance(self.data, str): + return self.data.encode(self.encoding) + elif self.data is None and self.path: + return AnyPath(self.path).read_bytes() # type: ignore + else: + raise ValueError(f"Unable to get bytes for blob {self}") + + @contextlib.contextmanager + def as_bytes_io(self) -> Generator[Union[BytesIO, BufferedReader], None, None]: + """Read data as a byte stream.""" + from cloudpathlib import AnyPath + + if isinstance(self.data, bytes): + yield BytesIO(self.data) + elif self.data is None and self.path: + return AnyPath(self.path).read_bytes() # type: ignore + else: + raise NotImplementedError(f"Unable to convert blob {self}") + + +def _url_to_filename(url: str) -> str: + """ + Convert file:, s3:, az: or gs: url to localfile. + If the file is not here, download it in a temporary file. + """ + from cloudpathlib import AnyPath + + url_parsed = urlparse(url) + suffix = Path(url_parsed.path).suffix + if url_parsed.scheme in ["s3", "az", "gs"]: + with AnyPath(url).open("rb") as f: # type: ignore + temp_file = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) + while True: + buf = f.read() + if not buf: + break + temp_file.write(buf) + temp_file.close() + file_path = temp_file.name + elif url_parsed.scheme in ["file", ""]: + file_path = url_parsed.path + else: + raise ValueError(f"Scheme {url_parsed.scheme} not supported") + return file_path + + +def _make_iterator( + length_func: Callable[[], int], show_progress: bool = False +) -> Callable[[Iterable[T]], Iterator[T]]: + """Create a function that optionally wraps an iterable in tqdm.""" + if show_progress: + try: + from tqdm.auto import tqdm + except ImportError: + raise ImportError( + "You must install tqdm to use show_progress=True." + "You can install tqdm with `pip install tqdm`." + ) + + # Make sure to provide `total` here so that tqdm can show + # a progress bar that takes into account the total number of files. + def _with_tqdm(iterable: Iterable[T]) -> Iterator[T]: + """Wrap an iterable in a tqdm progress bar.""" + return tqdm(iterable, total=length_func()) + + iterator = _with_tqdm + else: + iterator = iter # type: ignore + + return iterator + + +# PUBLIC API + + +class CloudBlobLoader(BlobLoader): + """Load blobs from cloud URL or file:. + + Example: + + .. code-block:: python + + loader = CloudBlobLoader("s3://mybucket/id") + + for blob in loader.yield_blobs(): + print(blob) + """ # noqa: E501 + + def __init__( + self, + url: Union[str, "AnyPath"], + *, + glob: str = "**/[!.]*", + exclude: Sequence[str] = (), + suffixes: Optional[Sequence[str]] = None, + show_progress: bool = False, + ) -> None: + """Initialize with a url and how to glob over it. + + Use [CloudPathLib](https://cloudpathlib.drivendata.org/). + + Args: + url: Cloud URL to load from. + Supports s3://, az://, gs://, file:// schemes. + If no scheme is provided, it is assumed to be a local file. + If a path to a file is provided, glob/exclude/suffixes are ignored. + glob: Glob pattern relative to the specified path + by default set to pick up all non-hidden files + exclude: patterns to exclude from results, use glob syntax + suffixes: Provide to keep only files with these suffixes + Useful when wanting to keep files with different suffixes + Suffixes must include the dot, e.g. ".txt" + show_progress: If true, will show a progress bar as the files are loaded. + This forces an iteration through all matching files + to count them prior to loading them. + + Examples: + + .. code-block:: python + from langchain_community.document_loaders.blob_loaders import CloudBlobLoader + + # Load a single file. + loader = CloudBlobLoader("s3://mybucket/id") # az:// + + # Recursively load all text files in a directory. + loader = CloudBlobLoader("az://mybucket/id", glob="**/*.txt") + + # Recursively load all non-hidden files in a directory. + loader = CloudBlobLoader("gs://mybucket/id", glob="**/[!.]*") + + # Load all files in a directory without recursion. + loader = CloudBlobLoader("s3://mybucket/id", glob="*") + + # Recursively load all files in a directory, except for py or pyc files. + loader = CloudBlobLoader( + "s3://mybucket/id", + glob="**/*.txt", + exclude=["**/*.py", "**/*.pyc"] + ) + """ # noqa: E501 + from cloudpathlib import AnyPath + + url_parsed = urlparse(str(url)) + + if url_parsed.scheme == "file": + url = url_parsed.path + + if isinstance(url, str): + self.path = AnyPath(url) + else: + self.path = url + + self.glob = glob + self.suffixes = set(suffixes or []) + self.show_progress = show_progress + self.exclude = exclude + + def yield_blobs( + self, + ) -> Iterable[Blob]: + """Yield blobs that match the requested pattern.""" + iterator = _make_iterator( + length_func=self.count_matching_files, show_progress=self.show_progress + ) + + for path in iterator(self._yield_paths()): + # yield Blob.from_path(path) + yield self.from_path(path) + + def _yield_paths(self) -> Iterable["AnyPath"]: + """Yield paths that match the requested pattern.""" + if self.path.is_file(): # type: ignore + yield self.path + return + + paths = self.path.glob(self.glob) + for path in paths: + if self.exclude: + if any(path.match(glob) for glob in self.exclude): + continue + if path.is_file(): + if self.suffixes and path.suffix not in self.suffixes: + continue # FIXME + yield path + + def count_matching_files(self) -> int: + """Count files that match the pattern without loading them.""" + # Carry out a full iteration to count the files without + # materializing anything expensive in memory. + num = 0 + for _ in self._yield_paths(): + num += 1 + return num + + @classmethod + def from_path( + cls, + path: "AnyPath", + *, + encoding: str = "utf-8", + mime_type: Optional[str] = None, + guess_type: bool = True, + metadata: Optional[dict] = None, + ) -> Blob: + """Load the blob from a path like object. + + Args: + path: path like object to file to be read + Supports s3://, az://, gs://, file:// schemes. + If no scheme is provided, it is assumed to be a local file. + encoding: Encoding to use if decoding the bytes into a string + mime_type: if provided, will be set as the mime-type of the data + guess_type: If True, the mimetype will be guessed from the file extension, + if a mime-type was not provided + metadata: Metadata to associate with the blob + + Returns: + Blob instance + """ + if mime_type is None and guess_type: + _mimetype = mimetypes.guess_type(path)[0] if guess_type else None # type: ignore + else: + _mimetype = mime_type + + url_parsed = urlparse(str(path)) + if url_parsed.scheme in ["file", ""]: + if url_parsed.scheme == "file": + local_path = url_parsed.path + else: + local_path = str(path) + return Blob( + data=None, + mimetype=_mimetype, + encoding=encoding, + path=local_path, + metadata=metadata if metadata is not None else {}, + ) + + return _CloudBlob( + data=None, + mimetype=_mimetype, + encoding=encoding, + path=str(path), + metadata=metadata if metadata is not None else {}, + ) diff --git a/libs/community/langchain_community/document_loaders/cassandra.py b/libs/community/langchain_community/document_loaders/cassandra.py index 78c598ffa9a..8c31df029e6 100644 --- a/libs/community/langchain_community/document_loaders/cassandra.py +++ b/libs/community/langchain_community/document_loaders/cassandra.py @@ -14,7 +14,7 @@ from typing import ( from langchain_core.documents import Document from langchain_community.document_loaders.base import BaseLoader -from langchain_community.utilities.cassandra import wrapped_response_future +from langchain_community.utilities.cassandra import aexecute_cql _NOT_SET = object() @@ -118,11 +118,7 @@ class CassandraLoader(BaseLoader): ) async def alazy_load(self) -> AsyncIterator[Document]: - for row in await wrapped_response_future( - self.session.execute_async, - self.query, - **self.query_kwargs, - ): + for row in await aexecute_cql(self.session, self.query, **self.query_kwargs): metadata = self.metadata.copy() metadata.update(self.metadata_mapper(row)) yield Document( diff --git a/libs/community/langchain_community/document_loaders/csv_loader.py b/libs/community/langchain_community/document_loaders/csv_loader.py index fca2f1f0f9b..37e6f565531 100644 --- a/libs/community/langchain_community/document_loaders/csv_loader.py +++ b/libs/community/langchain_community/document_loaders/csv_loader.py @@ -97,7 +97,9 @@ class CSVLoader(BaseLoader): f"Source column '{self.source_column}' not found in CSV file." ) content = "\n".join( - f"{k.strip()}: {v.strip() if v is not None else v}" + f"""{k.strip() if k is not None else k}: {v.strip() + if isinstance(v, str) else ','.join(map(str.strip, v)) + if isinstance(v, list) else v}""" for k, v in row.items() if k not in self.metadata_columns ) diff --git a/libs/community/langchain_community/document_loaders/evernote.py b/libs/community/langchain_community/document_loaders/evernote.py index 07ade0ce5d8..296e4bcd7e7 100644 --- a/libs/community/langchain_community/document_loaders/evernote.py +++ b/libs/community/langchain_community/document_loaders/evernote.py @@ -34,7 +34,7 @@ class EverNoteLoader(BaseLoader): notes into a single long Document. If this is set to True (default) then the only metadata on the document will be the 'source' which contains the file name of the export. - """ # noqa: E501 + """ def __init__(self, file_path: Union[str, Path], load_single_document: bool = True): """Initialize with file path.""" diff --git a/libs/community/langchain_community/document_loaders/firecrawl.py b/libs/community/langchain_community/document_loaders/firecrawl.py index ad09f532311..da87466d3de 100644 --- a/libs/community/langchain_community/document_loaders/firecrawl.py +++ b/libs/community/langchain_community/document_loaders/firecrawl.py @@ -25,7 +25,7 @@ class FireCrawlLoader(BaseLoader): Args: url: The url to be crawled. api_key: The Firecrawl API key. If not specified will be read from env var - FIREWALL_API_KEY. Get an API key + FIRECRAWL_API_KEY. Get an API key mode: The mode to run the loader in. Default is "crawl". Options include "scrape" (single url) and "crawl" (all accessible sub pages). @@ -44,7 +44,7 @@ class FireCrawlLoader(BaseLoader): raise ValueError( f"Unrecognized mode '{mode}'. Expected one of 'crawl', 'scrape'." ) - api_key = api_key or get_from_env("api_key", "FIREWALL_API_KEY") + api_key = api_key or get_from_env("api_key", "FIRECRAWL_API_KEY") self.firecrawl = FirecrawlApp(api_key=api_key) self.url = url self.mode = mode diff --git a/libs/community/langchain_community/document_loaders/parsers/__init__.py b/libs/community/langchain_community/document_loaders/parsers/__init__.py index c425f8b546c..13261622d18 100644 --- a/libs/community/langchain_community/document_loaders/parsers/__init__.py +++ b/libs/community/langchain_community/document_loaders/parsers/__init__.py @@ -1,5 +1,36 @@ import importlib -from typing import Any +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.audio import ( + OpenAIWhisperParser, + ) + from langchain_community.document_loaders.parsers.doc_intelligence import ( + AzureAIDocumentIntelligenceParser, + ) + from langchain_community.document_loaders.parsers.docai import ( + DocAIParser, + ) + from langchain_community.document_loaders.parsers.grobid import ( + GrobidParser, + ) + from langchain_community.document_loaders.parsers.html import ( + BS4HTMLParser, + ) + from langchain_community.document_loaders.parsers.language import ( + LanguageParser, + ) + from langchain_community.document_loaders.parsers.pdf import ( + PDFMinerParser, + PDFPlumberParser, + PyMuPDFParser, + PyPDFium2Parser, + PyPDFParser, + ) + from langchain_community.document_loaders.parsers.vsdx import ( + VsdxParser, + ) + _module_lookup = { "AzureAIDocumentIntelligenceParser": "langchain_community.document_loaders.parsers.doc_intelligence", # noqa: E501 @@ -24,4 +55,17 @@ def __getattr__(name: str) -> Any: raise AttributeError(f"module {__name__} has no attribute {name}") -__all__ = list(_module_lookup.keys()) +__all__ = [ + "AzureAIDocumentIntelligenceParser", + "BS4HTMLParser", + "DocAIParser", + "GrobidParser", + "LanguageParser", + "OpenAIWhisperParser", + "PDFMinerParser", + "PDFPlumberParser", + "PyMuPDFParser", + "PyPDFParser", + "PyPDFium2Parser", + "VsdxParser", +] diff --git a/libs/community/langchain_community/document_loaders/pebblo.py b/libs/community/langchain_community/document_loaders/pebblo.py index 4bc3aa1db5d..8a710a9a5b2 100644 --- a/libs/community/langchain_community/document_loaders/pebblo.py +++ b/libs/community/langchain_community/document_loaders/pebblo.py @@ -208,9 +208,9 @@ class PebbloSafeLoader(BaseLoader): if loading_end is True: payload["loading_end"] = "true" if "loader_details" in payload: - payload["loader_details"]["source_aggregate_size"] = ( # noqa - self.source_aggregate_size - ) + payload["loader_details"][ + "source_aggregate_size" + ] = self.source_aggregate_size payload = Doc(**payload).dict(exclude_unset=True) load_doc_url = f"{self.classifier_url}{LOADER_DOC_URL}" classified_docs = [] diff --git a/libs/community/langchain_community/document_loaders/quip.py b/libs/community/langchain_community/document_loaders/quip.py index 0d9c4474fb0..540ef8f945c 100644 --- a/libs/community/langchain_community/document_loaders/quip.py +++ b/libs/community/langchain_community/document_loaders/quip.py @@ -1,10 +1,9 @@ import logging import re -import xml.etree.cElementTree -import xml.sax.saxutils +import xml.etree.cElementTree # OK: user-must-opt-in from io import BytesIO from typing import List, Optional, Sequence -from xml.etree.ElementTree import ElementTree +from xml.etree.ElementTree import ElementTree # OK: user-must-opt-in from langchain_core.documents import Document @@ -22,14 +21,20 @@ class QuipLoader(BaseLoader): """ def __init__( - self, api_url: str, access_token: str, request_timeout: Optional[int] = 60 + self, + api_url: str, + access_token: str, + request_timeout: Optional[int] = 60, + *, + allow_dangerous_xml_parsing: bool = False, ): """ Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: - https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs + https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs request_timeout: timeout of request, default 60s. + allow_dangerous_xml_parsing: Allow dangerous XML parsing, defaults to False """ try: from quip_api.quip import QuipClient @@ -42,6 +47,17 @@ class QuipLoader(BaseLoader): access_token=access_token, base_url=api_url, request_timeout=request_timeout ) + if not allow_dangerous_xml_parsing: + raise ValueError( + "The quip client uses the built-in XML parser which may cause" + "security issues when parsing XML data in some cases. " + "Please see " + "https://docs.python.org/3/library/xml.html#xml-vulnerabilities " + "For more information, set `allow_dangerous_xml_parsing` as True " + "if you are sure that your distribution of the standard library " + "is not vulnerable to XML vulnerabilities." + ) + def load( self, folder_ids: Optional[List[str]] = None, diff --git a/libs/community/langchain_community/document_loaders/scrapfly.py b/libs/community/langchain_community/document_loaders/scrapfly.py new file mode 100644 index 00000000000..b774d46aded --- /dev/null +++ b/libs/community/langchain_community/document_loaders/scrapfly.py @@ -0,0 +1,69 @@ +"""Scrapfly Web Reader.""" +import logging +from typing import Iterator, List, Literal, Optional + +from langchain_core.document_loaders import BaseLoader +from langchain_core.documents import Document +from langchain_core.utils import get_from_env + +logger = logging.getLogger(__file__) + + +class ScrapflyLoader(BaseLoader): + """Turn a url to llm accessible markdown with `Scrapfly.io`. + + For further details, visit: https://scrapfly.io/docs/sdk/python + """ + + def __init__( + self, + urls: List[str], + *, + api_key: Optional[str] = None, + scrape_format: Literal["markdown", "text"] = "markdown", + scrape_config: Optional[dict] = None, + continue_on_failure: bool = True, + ) -> None: + """Initialize client. + + Args: + urls: List of urls to scrape. + api_key: The Scrapfly API key. If not specified must have env var + SCRAPFLY_API_KEY set. + scrape_format: Scrape result format, one or "markdown" or "text". + scrape_config: Dictionary of ScrapFly scrape config object. + continue_on_failure: Whether to continue if scraping a url fails. + """ + try: + from scrapfly import ScrapflyClient + except ImportError: + raise ImportError( + "`scrapfly` package not found, please run `pip install scrapfly-sdk`" + ) + if not urls: + raise ValueError("URLs must be provided.") + api_key = api_key or get_from_env("api_key", "SCRAPFLY_API_KEY") + self.scrapfly = ScrapflyClient(key=api_key) + self.urls = urls + self.scrape_format = scrape_format + self.scrape_config = scrape_config + self.continue_on_failure = continue_on_failure + + def lazy_load(self) -> Iterator[Document]: + from scrapfly import ScrapeConfig + + scrape_config = self.scrape_config if self.scrape_config is not None else {} + for url in self.urls: + try: + response = self.scrapfly.scrape( + ScrapeConfig(url, format=self.scrape_format, **scrape_config) + ) + yield Document( + page_content=response.scrape_result["content"], + metadata={"url": url}, + ) + except Exception as e: + if self.continue_on_failure: + logger.error(f"Error fetching data from {url}, exception: {e}") + else: + raise e diff --git a/libs/community/langchain_community/document_loaders/sharepoint.py b/libs/community/langchain_community/document_loaders/sharepoint.py index f4d57d66d42..03674f92f68 100644 --- a/libs/community/langchain_community/document_loaders/sharepoint.py +++ b/libs/community/langchain_community/document_loaders/sharepoint.py @@ -1,8 +1,13 @@ """Loader that loads data from Sharepoint Document Library""" + from __future__ import annotations -from typing import Iterator, List, Optional, Sequence +import json +from pathlib import Path +from typing import Any, Iterator, List, Optional, Sequence +import requests +from langchain_core.document_loaders import BaseLoader from langchain_core.documents import Document from langchain_core.pydantic_v1 import Field @@ -13,7 +18,7 @@ from langchain_community.document_loaders.base_o365 import ( from langchain_community.document_loaders.parsers.registry import get_parser -class SharePointLoader(O365BaseLoader): +class SharePointLoader(O365BaseLoader, BaseLoader): """Load from `SharePoint`.""" document_library_id: str = Field(...) @@ -24,6 +29,14 @@ class SharePointLoader(O365BaseLoader): """ The IDs of the objects to load data from.""" folder_id: Optional[str] = None """ The ID of the folder to load data from.""" + load_auth: Optional[bool] = False + """ Whether to load authorization identities.""" + token_path: Path = Path.home() / ".credentials" / "o365_token.txt" + """ The path to the token to make api calls""" + file_id: Optional[str] = None + """ The ID of the file for which we need auth identities""" + site_id: Optional[str] = None + """ The ID of the Sharepoint site of the user where the file is present """ @property def _file_types(self) -> Sequence[_FileType]: @@ -52,7 +65,13 @@ class SharePointLoader(O365BaseLoader): if not isinstance(target_folder, Folder): raise ValueError(f"There isn't a folder with path {self.folder_path}.") for blob in self._load_from_folder(target_folder): - yield from blob_parser.lazy_parse(blob) + if self.load_auth is True: + for parsed_blob in blob_parser.lazy_parse(blob): + auth_identities = self.authorized_identities() + parsed_blob.metadata["authorized_identities"] = auth_identities + yield parsed_blob + else: + yield from blob_parser.lazy_parse(blob) if self.folder_id: target_folder = drive.get_item(self.folder_id) if not isinstance(target_folder, Folder): @@ -67,4 +86,42 @@ class SharePointLoader(O365BaseLoader): if not isinstance(target_folder, Folder): raise ValueError("Unable to fetch root folder") for blob in self._load_from_folder(target_folder): - yield from blob_parser.lazy_parse(blob) + for blob_part in blob_parser.lazy_parse(blob): + blob_part.metadata.update(blob.metadata) + yield blob_part + + def authorized_identities(self) -> List: + data = self._fetch_access_token() + access_token = data.get("access_token") + url = ( + f"https://graph.microsoft.com/v1.0/sites/{self.site_id}/" + f"drives/{self.document_library_id}/items/{self.file_id}/permissions" + ) + headers = {"Authorization": f"Bearer {access_token}"} + response = requests.request("GET", url, headers=headers, data={}) + groups_list = response.json() + + group_names = [] + + for group_data in groups_list.get("value"): + if group_data.get("grantedToV2"): + if group_data.get("grantedToV2").get("siteGroup"): + site_data = group_data.get("grantedToV2").get("siteGroup") + # print(group_data) + group_names.append(site_data.get("displayName")) + elif group_data.get("grantedToV2").get("group") or ( + group_data.get("grantedToV2").get("user") + ): + site_data = group_data.get("grantedToV2").get("group") or ( + group_data.get("grantedToV2").get("user") + ) + # print(group_data) + group_names.append(site_data.get("displayName")) + + return group_names + + def _fetch_access_token(self) -> Any: + with open(self.token_path) as f: + s = f.read() + data = json.loads(s) + return data diff --git a/libs/community/langchain_community/document_loaders/web_base.py b/libs/community/langchain_community/document_loaders/web_base.py index b07f904c5fc..b925f792e57 100644 --- a/libs/community/langchain_community/document_loaders/web_base.py +++ b/libs/community/langchain_community/document_loaders/web_base.py @@ -134,6 +134,8 @@ class WebBaseLoader(BaseLoader): ssl=None if self.session.verify else False, cookies=self.session.cookies.get_dict(), ) as response: + if self.raise_for_status: + response.raise_for_status() return await response.text() except aiohttp.ClientConnectionError as e: if i == retries - 1: diff --git a/libs/community/langchain_community/document_transformers/__init__.py b/libs/community/langchain_community/document_transformers/__init__.py index 160946e8cf9..14aa448841e 100644 --- a/libs/community/langchain_community/document_transformers/__init__.py +++ b/libs/community/langchain_community/document_transformers/__init__.py @@ -81,7 +81,7 @@ _module_lookup = { "GoogleTranslateTransformer": "langchain_community.document_transformers.google_translate", # noqa: E501 "Html2TextTransformer": "langchain_community.document_transformers.html2text", "LongContextReorder": "langchain_community.document_transformers.long_context_reorder", # noqa: E501 - "MarkdownifyTransformer": "langchain_community.document_transformers.markdownify", # noqa: E501 + "MarkdownifyTransformer": "langchain_community.document_transformers.markdownify", "NucliaTextTransformer": "langchain_community.document_transformers.nuclia_text_transform", # noqa: E501 "OpenAIMetadataTagger": "langchain_community.document_transformers.openai_functions", # noqa: E501 "get_stateful_documents": "langchain_community.document_transformers.embeddings_redundant_filter", # noqa: E501 @@ -93,6 +93,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/document_transformers/openai_functions.py b/libs/community/langchain_community/document_transformers/openai_functions.py index 5c9dec22e61..ccad6ab1ec3 100644 --- a/libs/community/langchain_community/document_transformers/openai_functions.py +++ b/libs/community/langchain_community/document_transformers/openai_functions.py @@ -57,7 +57,7 @@ class OpenAIMetadataTagger(BaseDocumentTransformer, BaseModel): new_documents = [] for document in documents: - extracted_metadata: Dict = self.tagging_chain.run(document.page_content) # type: ignore[assignment] # noqa: E501 + extracted_metadata: Dict = self.tagging_chain.run(document.page_content) # type: ignore[assignment] new_document = Document( page_content=document.page_content, metadata={**extracted_metadata, **document.metadata}, diff --git a/libs/community/langchain_community/embeddings/__init__.py b/libs/community/langchain_community/embeddings/__init__.py index 499f79453a2..5f72f232997 100644 --- a/libs/community/langchain_community/embeddings/__init__.py +++ b/libs/community/langchain_community/embeddings/__init__.py @@ -43,6 +43,9 @@ if TYPE_CHECKING: from langchain_community.embeddings.clarifai import ( ClarifaiEmbeddings, ) + from langchain_community.embeddings.clova import ( + ClovaEmbeddings, + ) from langchain_community.embeddings.cohere import ( CohereEmbeddings, ) @@ -170,7 +173,7 @@ if TYPE_CHECKING: QuantizedBiEncoderEmbeddings, ) from langchain_community.embeddings.oracleai import ( - OracleEmbeddings, # noqa: F401 + OracleEmbeddings, ) from langchain_community.embeddings.premai import ( PremAIEmbeddings, @@ -203,6 +206,9 @@ if TYPE_CHECKING: from langchain_community.embeddings.tensorflow_hub import ( TensorflowHubEmbeddings, ) + from langchain_community.embeddings.titan_takeoff import ( + TitanTakeoffEmbed, + ) from langchain_community.embeddings.vertexai import ( VertexAIEmbeddings, ) @@ -229,6 +235,7 @@ __all__ = [ "BedrockEmbeddings", "BookendEmbeddings", "ClarifaiEmbeddings", + "ClovaEmbeddings", "CohereEmbeddings", "DashScopeEmbeddings", "DatabricksEmbeddings", @@ -288,6 +295,7 @@ __all__ = [ "SpacyEmbeddings", "SparkLLMTextEmbeddings", "TensorflowHubEmbeddings", + "TitanTakeoffEmbed", "VertexAIEmbeddings", "VolcanoEmbeddings", "VoyageEmbeddings", @@ -305,6 +313,7 @@ _module_lookup = { "BedrockEmbeddings": "langchain_community.embeddings.bedrock", "BookendEmbeddings": "langchain_community.embeddings.bookend", "ClarifaiEmbeddings": "langchain_community.embeddings.clarifai", + "ClovaEmbeddings": "langchain_community.embeddings.clova", "CohereEmbeddings": "langchain_community.embeddings.cohere", "DashScopeEmbeddings": "langchain_community.embeddings.dashscope", "DatabricksEmbeddings": "langchain_community.embeddings.databricks", @@ -380,8 +389,6 @@ def __getattr__(name: str) -> Any: raise AttributeError(f"module {__name__} has no attribute {name}") -__all__ = list(_module_lookup.keys()) - logger = logging.getLogger(__name__) diff --git a/libs/community/langchain_community/embeddings/azure_openai.py b/libs/community/langchain_community/embeddings/azure_openai.py index c8eb14ddac6..f8eeca2b2cd 100644 --- a/libs/community/langchain_community/embeddings/azure_openai.py +++ b/libs/community/langchain_community/embeddings/azure_openai.py @@ -44,7 +44,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. - """ # noqa: E501 + """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. diff --git a/libs/community/langchain_community/embeddings/clova.py b/libs/community/langchain_community/embeddings/clova.py new file mode 100644 index 00000000000..59b28782e33 --- /dev/null +++ b/libs/community/langchain_community/embeddings/clova.py @@ -0,0 +1,134 @@ +from __future__ import annotations + +from typing import Dict, List, Optional, cast + +import requests +from langchain_core.embeddings import Embeddings +from langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr, root_validator +from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env + + +class ClovaEmbeddings(BaseModel, Embeddings): + """ + Clova's embedding service. + + To use this service, + + you should have the following environment variables + set with your API tokens and application ID, + or pass them as named parameters to the constructor: + + - ``CLOVA_EMB_API_KEY``: API key for accessing Clova's embedding service. + - ``CLOVA_EMB_APIGW_API_KEY``: API gateway key for enhanced security. + - ``CLOVA_EMB_APP_ID``: Application ID for identifying your application. + + Example: + .. code-block:: python + + from langchain_community.embeddings import ClovaEmbeddings + embeddings = ClovaEmbeddings( + clova_emb_api_key='your_clova_emb_api_key', + clova_emb_apigw_api_key='your_clova_emb_apigw_api_key', + app_id='your_app_id' + ) + + query_text = "This is a test query." + query_result = embeddings.embed_query(query_text) + + document_text = "This is a test document." + document_result = embeddings.embed_documents([document_text]) + + """ + + endpoint_url: str = ( + "https://clovastudio.apigw.ntruss.com/testapp/v1/api-tools/embedding" + ) + """Endpoint URL to use.""" + model: str = "clir-emb-dolphin" + """Embedding model name to use.""" + clova_emb_api_key: Optional[SecretStr] = None + """API key for accessing Clova's embedding service.""" + clova_emb_apigw_api_key: Optional[SecretStr] = None + """API gateway key for enhanced security.""" + app_id: Optional[SecretStr] = None + """Application ID for identifying your application.""" + + class Config: + extra = Extra.forbid + + @root_validator(pre=True, allow_reuse=True) + def validate_environment(cls, values: Dict) -> Dict: + """Validate api key exists in environment.""" + values["clova_emb_api_key"] = convert_to_secret_str( + get_from_dict_or_env(values, "clova_emb_api_key", "CLOVA_EMB_API_KEY") + ) + values["clova_emb_apigw_api_key"] = convert_to_secret_str( + get_from_dict_or_env( + values, "clova_emb_apigw_api_key", "CLOVA_EMB_APIGW_API_KEY" + ) + ) + values["app_id"] = convert_to_secret_str( + get_from_dict_or_env(values, "app_id", "CLOVA_EMB_APP_ID") + ) + return values + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """ + Embed a list of texts and return their embeddings. + + Args: + texts: The list of texts to embed. + + Returns: + List of embeddings, one for each text. + """ + embeddings = [] + for text in texts: + embeddings.append(self._embed_text(text)) + return embeddings + + def embed_query(self, text: str) -> List[float]: + """ + Embed a single query text and return its embedding. + + Args: + text: The text to embed. + + Returns: + Embeddings for the text. + """ + return self._embed_text(text) + + def _embed_text(self, text: str) -> List[float]: + """ + Internal method to call the embedding API and handle the response. + """ + payload = {"text": text} + + # HTTP headers for authorization + headers = { + "X-NCP-CLOVASTUDIO-API-KEY": cast( + SecretStr, self.clova_emb_api_key + ).get_secret_value(), + "X-NCP-APIGW-API-KEY": cast( + SecretStr, self.clova_emb_apigw_api_key + ).get_secret_value(), + "Content-Type": "application/json", + } + + # send request + app_id = cast(SecretStr, self.app_id).get_secret_value() + response = requests.post( + f"{self.endpoint_url}/{self.model}/{app_id}", + headers=headers, + json=payload, + ) + + # check for errors + if response.status_code == 200: + response_data = response.json() + if "result" in response_data and "embedding" in response_data["result"]: + return response_data["result"]["embedding"] + raise ValueError( + f"API request failed with status {response.status_code}: {response.text}" + ) diff --git a/libs/community/langchain_community/embeddings/elasticsearch.py b/libs/community/langchain_community/embeddings/elasticsearch.py index 80596b6fd92..ea080ab9aa9 100644 --- a/libs/community/langchain_community/embeddings/elasticsearch.py +++ b/libs/community/langchain_community/embeddings/elasticsearch.py @@ -25,7 +25,7 @@ class ElasticsearchEmbeddings(Embeddings): In Elasticsearch you need to have an embedding model loaded and deployed. - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html - """ # noqa: E501 + """ def __init__( self, diff --git a/libs/community/langchain_community/embeddings/infinity.py b/libs/community/langchain_community/embeddings/infinity.py index f5068e64f63..9af640baf62 100644 --- a/libs/community/langchain_community/embeddings/infinity.py +++ b/libs/community/langchain_community/embeddings/infinity.py @@ -182,7 +182,7 @@ class TinyAsyncOpenAIInfinityEmbeddingClient: #: :meta private: length_sorted_idx = np.argsort([-sorter(sen) for sen in texts]) texts_sorted = [texts[idx] for idx in length_sorted_idx] - return texts_sorted, lambda unsorted_embeddings: [ # noqa E731 + return texts_sorted, lambda unsorted_embeddings: [ # E731 unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx) ] diff --git a/libs/community/langchain_community/embeddings/localai.py b/libs/community/langchain_community/embeddings/localai.py index b5a926e8fe2..fe620e9a6fa 100644 --- a/libs/community/langchain_community/embeddings/localai.py +++ b/libs/community/langchain_community/embeddings/localai.py @@ -254,7 +254,7 @@ class LocalAIEmbeddings(BaseModel, Embeddings): openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, - } # type: ignore[assignment] # noqa: E501 + } # type: ignore[assignment] return openai_args def _embedding_func(self, text: str, *, engine: str) -> List[float]: diff --git a/libs/community/langchain_community/embeddings/openai.py b/libs/community/langchain_community/embeddings/openai.py index 3edff9055e8..22ddac802a4 100644 --- a/libs/community/langchain_community/embeddings/openai.py +++ b/libs/community/langchain_community/embeddings/openai.py @@ -390,7 +390,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): openai.proxy = { "http": self.openai_proxy, "https": self.openai_proxy, - } # type: ignore[assignment] # noqa: E501 + } # type: ignore[assignment] return openai_args # please refer to diff --git a/libs/community/langchain_community/embeddings/titan_takeoff.py b/libs/community/langchain_community/embeddings/titan_takeoff.py index 81966c6739e..bf82f54936e 100644 --- a/libs/community/langchain_community/embeddings/titan_takeoff.py +++ b/libs/community/langchain_community/embeddings/titan_takeoff.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Any, List, Optional, Set, Union +from typing import Any, Dict, List, Optional, Set, Union from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import BaseModel @@ -142,11 +142,12 @@ class TitanTakeoffEmbed(Embeddings): def _embed( self, input: Union[List[str], str], consumer_group: Optional[str] - ) -> dict: + ) -> Dict[str, Any]: """Embed text. Args: - input (List[str]): prompt/document or list of prompts/documents to embed + input (Union[List[str], str]): prompt/document or list of prompts/documents + to embed consumer_group (Optional[str]): what consumer group to send the embedding request to. If not specified and there is only one consumer group specified during initialization, it will be used. If there diff --git a/libs/community/langchain_community/embeddings/yandex.py b/libs/community/langchain_community/embeddings/yandex.py index 603ca919430..f396239d4fd 100644 --- a/libs/community/langchain_community/embeddings/yandex.py +++ b/libs/community/langchain_community/embeddings/yandex.py @@ -107,13 +107,13 @@ class YandexGPTEmbeddings(BaseModel, Embeddings): raise ValueError("'doc_model_uri' or 'folder_id' must be provided.") values[ "doc_model_uri" - ] = f"emb://{values['folder_id']}/{values['doc_model_name']}/{values['model_version']}" # noqa: E501 + ] = f"emb://{values['folder_id']}/{values['doc_model_name']}/{values['model_version']}" if not values.get("model_uri"): if values["folder_id"] == "": raise ValueError("'model_uri' or 'folder_id' must be provided.") values[ "model_uri" - ] = f"emb://{values['folder_id']}/{values['model_name']}/{values['model_version']}" # noqa: E501 + ] = f"emb://{values['folder_id']}/{values['model_name']}/{values['model_version']}" if values["disable_request_logging"]: values["_grpc_metadata"].append( ( diff --git a/libs/community/langchain_community/graphs/__init__.py b/libs/community/langchain_community/graphs/__init__.py index fd9fec8ef41..37bbf71b040 100644 --- a/libs/community/langchain_community/graphs/__init__.py +++ b/libs/community/langchain_community/graphs/__init__.py @@ -29,6 +29,8 @@ if TYPE_CHECKING: Neo4jGraph, ) from langchain_community.graphs.neptune_graph import ( + BaseNeptuneGraph, + NeptuneAnalyticsGraph, NeptuneGraph, ) from langchain_community.graphs.neptune_rdf_graph import ( @@ -53,11 +55,13 @@ __all__ = [ "GremlinGraph", "HugeGraph", "KuzuGraph", + "BaseNeptuneGraph", "MemgraphGraph", "NebulaGraph", "Neo4jGraph", "NeptuneGraph", "NeptuneRdfGraph", + "NeptuneAnalyticsGraph", "NetworkxEntityGraph", "OntotextGraphDBGraph", "RdfGraph", @@ -89,6 +93,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/graphs/arangodb_graph.py b/libs/community/langchain_community/graphs/arangodb_graph.py index b9e4530058e..dd2ad16614f 100644 --- a/libs/community/langchain_community/graphs/arangodb_graph.py +++ b/libs/community/langchain_community/graphs/arangodb_graph.py @@ -174,9 +174,9 @@ def get_arangodb_client( "Unable to import arango, please install with `pip install python-arango`." ) from e - _url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment] # noqa: E501 - _dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment] # noqa: E501 - _username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment] # noqa: E501 - _password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment] # noqa: E501 + _url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment] + _dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment] + _username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment] + _password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment] return ArangoClient(_url).db(_dbname, _username, _password, verify=True) diff --git a/libs/community/langchain_community/graphs/neo4j_graph.py b/libs/community/langchain_community/graphs/neo4j_graph.py index 697b2221cb2..a8b7bf6b0bd 100644 --- a/libs/community/langchain_community/graphs/neo4j_graph.py +++ b/libs/community/langchain_community/graphs/neo4j_graph.py @@ -151,7 +151,7 @@ def _format_schema(schema: Dict, is_enhanced: bool) -> str: formatted_node_props.append(f"- **{node_type}**") for prop in properties: example = "" - if prop["type"] == "STRING": + if prop["type"] == "STRING" and prop.get("values"): if prop.get("distinct_count", 11) > DISTINCT_VALUE_LIMIT: example = ( f'Example: "{clean_string_values(prop["values"][0])}"' @@ -400,7 +400,7 @@ class Neo4jGraph(GraphStore): """ Refreshes the Neo4j graph schema information. """ - from neo4j.exceptions import ClientError + from neo4j.exceptions import ClientError, CypherTypeError node_properties = [ el["output"] @@ -461,10 +461,14 @@ class Neo4jGraph(GraphStore): enhanced_cypher = self._enhanced_schema_cypher( node["name"], node_props, node["count"] < EXHAUSTIVE_SEARCH_LIMIT ) - enhanced_info = self.query(enhanced_cypher)[0]["output"] - for prop in node_props: - if prop["property"] in enhanced_info: - prop.update(enhanced_info[prop["property"]]) + # Due to schema-flexible nature of neo4j errors can happen + try: + enhanced_info = self.query(enhanced_cypher)[0]["output"] + for prop in node_props: + if prop["property"] in enhanced_info: + prop.update(enhanced_info[prop["property"]]) + except CypherTypeError: + continue # Update rel info for rel in schema_counts[0]["relationships"]: # Skip bloom labels @@ -479,10 +483,14 @@ class Neo4jGraph(GraphStore): rel["count"] < EXHAUSTIVE_SEARCH_LIMIT, is_relationship=True, ) - enhanced_info = self.query(enhanced_cypher)[0]["output"] - for prop in rel_props: - if prop["property"] in enhanced_info: - prop.update(enhanced_info[prop["property"]]) + try: + enhanced_info = self.query(enhanced_cypher)[0]["output"] + for prop in rel_props: + if prop["property"] in enhanced_info: + prop.update(enhanced_info[prop["property"]]) + # Due to schema-flexible nature of neo4j errors can happen + except CypherTypeError: + continue schema = _format_schema(self.structured_schema, self._enhanced_schema) @@ -587,8 +595,8 @@ class Neo4jGraph(GraphStore): if prop_type == "STRING": with_clauses.append( ( - f"collect(distinct substring(n.`{prop_name}`, 0, 50)) " - f"AS `{prop_name}_values`" + f"collect(distinct substring(toString(n.`{prop_name}`)" + f", 0, 50)) AS `{prop_name}_values`" ) ) return_clauses.append( @@ -664,8 +672,8 @@ class Neo4jGraph(GraphStore): else: with_clauses.append( ( - f"collect(distinct substring(n.`{prop_name}`, 0, 50)) " - f"AS `{prop_name}_values`" + f"collect(distinct substring(toString(n.`{prop_name}`)" + f", 0, 50)) AS `{prop_name}_values`" ) ) return_clauses.append(f"values: `{prop_name}_values`") diff --git a/libs/community/langchain_community/llms/azureml_endpoint.py b/libs/community/langchain_community/llms/azureml_endpoint.py index 925c43661d1..f1edd988514 100644 --- a/libs/community/langchain_community/llms/azureml_endpoint.py +++ b/libs/community/langchain_community/llms/azureml_endpoint.py @@ -496,7 +496,7 @@ class AzureMLOnlineEndpoint(BaseLLM, AzureMLBaseEndpoint): timeout=120, content_formatter=content_formatter, ) - """ # noqa: E501 + """ @property def _identifying_params(self) -> Mapping[str, Any]: diff --git a/libs/community/langchain_community/llms/openai.py b/libs/community/langchain_community/llms/openai.py index 0d891c299f1..757e3d85ba2 100644 --- a/libs/community/langchain_community/llms/openai.py +++ b/libs/community/langchain_community/llms/openai.py @@ -604,7 +604,7 @@ class BaseOpenAI(BaseLLM): if self.openai_proxy: import openai - openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501 + openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] return {**openai_creds, **self._default_params} @property @@ -800,7 +800,7 @@ class AzureOpenAI(BaseOpenAI): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. - """ # noqa: E501 + """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. @@ -1055,7 +1055,7 @@ class OpenAIChat(BaseLLM): if openai_organization: openai.organization = openai_organization if openai_proxy: - openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501 + openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] except ImportError: raise ImportError( "Could not import openai python package. " diff --git a/libs/community/langchain_community/llms/symblai_nebula.py b/libs/community/langchain_community/llms/symblai_nebula.py index afe6598f238..9e0b2687a2f 100644 --- a/libs/community/langchain_community/llms/symblai_nebula.py +++ b/libs/community/langchain_community/llms/symblai_nebula.py @@ -41,7 +41,7 @@ class Nebula(LLM): nebula_service_path="NEBULA_SERVICE_PATH", nebula_api_key="NEBULA_API_KEY", ) - """ # noqa: E501 + """ """Key/value arguments to pass to the model. Reserved for future use""" model_kwargs: Optional[dict] = None diff --git a/libs/community/langchain_community/memory/zep_cloud_memory.py b/libs/community/langchain_community/memory/zep_cloud_memory.py new file mode 100644 index 00000000000..24ddb04677c --- /dev/null +++ b/libs/community/langchain_community/memory/zep_cloud_memory.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +from typing import Any, Dict, Optional + +from langchain_community.chat_message_histories import ZepCloudChatMessageHistory + +try: + from langchain.memory import ConversationBufferMemory + from zep_cloud import MemoryGetRequestMemoryType + + class ZepCloudMemory(ConversationBufferMemory): + """Persist your chain history to the Zep MemoryStore. + + Documentation: https://help.getzep.com + + Example: + .. code-block:: python + + memory = ZepCloudMemory( + session_id=session_id, # Identifies your user or a user's session + api_key=, # Your Zep Project API key + memory_key="history", # Ensure this matches the key used in + # chain's prompt template + return_messages=True, # Does your prompt template expect a string + # or a list of Messages? + ) + chain = LLMChain(memory=memory,...) # Configure your chain to use the ZepMemory + instance + + + Note: + To persist metadata alongside your chat history, your will need to create a + custom Chain class that overrides the `prep_outputs` method to include the metadata + in the call to `self.memory.save_context`. + + + Zep - Recall, understand, and extract data from chat histories. Power personalized AI experiences. + ========= + Zep is a long-term memory service for AI Assistant apps. With Zep, you can provide AI assistants with the ability to recall past conversations, + no matter how distant, while also reducing hallucinations, latency, and cost. + + For more information on the zep-python package, see: + https://github.com/getzep/zep-python + + """ # noqa: E501 + + chat_memory: ZepCloudChatMessageHistory + + def __init__( + self, + session_id: str, + api_key: str, + memory_type: Optional[MemoryGetRequestMemoryType] = None, + lastn: Optional[int] = None, + output_key: Optional[str] = None, + input_key: Optional[str] = None, + return_messages: bool = False, + human_prefix: str = "Human", + ai_prefix: str = "AI", + memory_key: str = "history", + ): + """Initialize ZepMemory. + + Args: + session_id (str): Identifies your user or a user's session + api_key (str): Your Zep Project key. + memory_type (Optional[MemoryGetRequestMemoryType], optional): Zep Memory Type, defaults to perpetual + lastn (Optional[int], optional): Number of messages to retrieve. Will add the last summary generated prior to the nth oldest message. Defaults to 6 + output_key (Optional[str], optional): The key to use for the output message. + Defaults to None. + input_key (Optional[str], optional): The key to use for the input message. + Defaults to None. + return_messages (bool, optional): Does your prompt template expect a string + or a list of Messages? Defaults to False + i.e. return a string. + human_prefix (str, optional): The prefix to use for human messages. + Defaults to "Human". + ai_prefix (str, optional): The prefix to use for AI messages. + Defaults to "AI". + memory_key (str, optional): The key to use for the memory. + Defaults to "history". + Ensure that this matches the key used in + chain's prompt template. + """ # noqa: E501 + chat_message_history = ZepCloudChatMessageHistory( + session_id=session_id, + memory_type=memory_type, + lastn=lastn, + api_key=api_key, + ) + super().__init__( + chat_memory=chat_message_history, + output_key=output_key, + input_key=input_key, + return_messages=return_messages, + human_prefix=human_prefix, + ai_prefix=ai_prefix, + memory_key=memory_key, + ) + + def save_context( + self, + inputs: Dict[str, Any], + outputs: Dict[str, str], + metadata: Optional[Dict[str, Any]] = None, + ) -> None: + """Save context from this conversation to buffer. + + Args: + inputs (Dict[str, Any]): The inputs to the chain. + outputs (Dict[str, str]): The outputs from the chain. + metadata (Optional[Dict[str, Any]], optional): Any metadata to save with + the context. Defaults to None + + Returns: + None + """ + input_str, output_str = self._get_input_output(inputs, outputs) + self.chat_memory.add_user_message(input_str, metadata=metadata) + self.chat_memory.add_ai_message(output_str, metadata=metadata) +except ImportError: + # Placeholder object + class ZepCloudMemory: # type: ignore[no-redef] + pass diff --git a/libs/community/langchain_community/output_parsers/ernie_functions.py b/libs/community/langchain_community/output_parsers/ernie_functions.py index 223284649f3..93fe9771cb2 100644 --- a/libs/community/langchain_community/output_parsers/ernie_functions.py +++ b/libs/community/langchain_community/output_parsers/ernie_functions.py @@ -164,7 +164,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): else: fn_name = _result["name"] _args = _result["arguments"] - pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore # noqa: E501 + pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore return pydantic_args diff --git a/libs/community/langchain_community/retrievers/__init__.py b/libs/community/langchain_community/retrievers/__init__.py index b62e9185594..2382a3f56df 100644 --- a/libs/community/langchain_community/retrievers/__init__.py +++ b/libs/community/langchain_community/retrievers/__init__.py @@ -136,6 +136,9 @@ if TYPE_CHECKING: from langchain_community.retrievers.zep import ( ZepRetriever, ) + from langchain_community.retrievers.zep_cloud import ( + ZepCloudRetriever, + ) from langchain_community.retrievers.zilliz import ( ZillizRetriever, ) @@ -183,6 +186,7 @@ _module_lookup = { "WikipediaRetriever": "langchain_community.retrievers.wikipedia", "YouRetriever": "langchain_community.retrievers.you", "ZepRetriever": "langchain_community.retrievers.zep", + "ZepCloudRetriever": "langchain_community.retrievers.zep_cloud", "ZillizRetriever": "langchain_community.retrievers.zilliz", "NeuralDBRetriever": "langchain_community.retrievers.thirdai_neuraldb", } @@ -238,5 +242,6 @@ __all__ = [ "WikipediaRetriever", "YouRetriever", "ZepRetriever", + "ZepCloudRetriever", "ZillizRetriever", ] diff --git a/libs/community/langchain_community/retrievers/google_cloud_documentai_warehouse.py b/libs/community/langchain_community/retrievers/google_cloud_documentai_warehouse.py index e0a403b87e2..b989cef5b02 100644 --- a/libs/community/langchain_community/retrievers/google_cloud_documentai_warehouse.py +++ b/libs/community/langchain_community/retrievers/google_cloud_documentai_warehouse.py @@ -32,7 +32,7 @@ class GoogleDocumentAIWarehouseRetriever(BaseRetriever): Documents should be created and documents should be uploaded in a separate flow, and this retriever uses only Document AI - schema_id provided to search for revelant documents. + schema_id provided to search for relevant documents. More info: https://cloud.google.com/document-ai-warehouse. """ diff --git a/libs/community/langchain_community/retrievers/google_vertex_ai_search.py b/libs/community/langchain_community/retrievers/google_vertex_ai_search.py index 7c4351bc3cd..11cb6c58a77 100644 --- a/libs/community/langchain_community/retrievers/google_vertex_ai_search.py +++ b/libs/community/langchain_community/retrievers/google_vertex_ai_search.py @@ -190,7 +190,7 @@ class _BaseGoogleVertexAISearchRetriever(BaseModel): print( # noqa: T201 "Make sure that your data store is using Advanced Website " "Indexing.\n" - "https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" # noqa: E501 + "https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" ) return documents diff --git a/libs/community/langchain_community/retrievers/web_research.py b/libs/community/langchain_community/retrievers/web_research.py index 9003f51740a..2541d21c4e8 100644 --- a/libs/community/langchain_community/retrievers/web_research.py +++ b/libs/community/langchain_community/retrievers/web_research.py @@ -75,6 +75,11 @@ class WebResearchRetriever(BaseRetriever): url_database: List[str] = Field( default_factory=list, description="List of processed URLs" ) + trust_env: bool = Field( + False, + description="Whether to use the http_proxy/https_proxy env variables or " + "check .netrc for proxy configuration", + ) @classmethod def from_llm( @@ -87,6 +92,7 @@ class WebResearchRetriever(BaseRetriever): text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=150 ), + trust_env: bool = False, ) -> "WebResearchRetriever": """Initialize from llm using default template. @@ -97,6 +103,8 @@ class WebResearchRetriever(BaseRetriever): prompt: prompt to generating search questions num_search_results: Number of pages per Google search text_splitter: Text splitter for splitting web pages into chunks + trust_env: Whether to use the http_proxy/https_proxy env variables + or check .netrc for proxy configuration Returns: WebResearchRetriever @@ -124,6 +132,7 @@ class WebResearchRetriever(BaseRetriever): search=search, num_search_results=num_search_results, text_splitter=text_splitter, + trust_env=trust_env, ) def clean_search_query(self, query: str) -> str: @@ -191,7 +200,9 @@ class WebResearchRetriever(BaseRetriever): logger.info(f"New URLs to load: {new_urls}") # Load, split, and add new urls to vectorstore if new_urls: - loader = AsyncHtmlLoader(new_urls, ignore_load_errors=True) + loader = AsyncHtmlLoader( + new_urls, ignore_load_errors=True, trust_env=self.trust_env + ) html2text = Html2TextTransformer() logger.info("Indexing new urls...") docs = loader.load() diff --git a/libs/community/langchain_community/retrievers/zep_cloud.py b/libs/community/langchain_community/retrievers/zep_cloud.py new file mode 100644 index 00000000000..96758c71d98 --- /dev/null +++ b/libs/community/langchain_community/retrievers/zep_cloud.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from langchain_core.callbacks import ( + AsyncCallbackManagerForRetrieverRun, + CallbackManagerForRetrieverRun, +) +from langchain_core.documents import Document +from langchain_core.pydantic_v1 import root_validator +from langchain_core.retrievers import BaseRetriever + +if TYPE_CHECKING: + from zep_cloud import MemorySearchResult, SearchScope, SearchType + from zep_cloud.client import AsyncZep, Zep + + +class ZepCloudRetriever(BaseRetriever): + """`Zep Cloud` MemoryStore Retriever. + + Search your user's long-term chat history with Zep. + + Zep offers both simple semantic search and Maximal Marginal Relevance (MMR) + reranking of search results. + + Note: You will need to provide the user's `session_id` to use this retriever. + + Args: + api_key: Your Zep API key + session_id: Identifies your user or a user's session (required) + top_k: Number of documents to return (default: 3, optional) + search_type: Type of search to perform (similarity / mmr) + (default: similarity, optional) + mmr_lambda: Lambda value for MMR search. Defaults to 0.5 (optional) + + Zep - Recall, understand, and extract data from chat histories. + Power personalized AI experiences. + ========= + Zep is a long-term memory service for AI Assistant apps. + With Zep, you can provide AI assistants with the ability + to recall past conversations, + no matter how distant, while also reducing hallucinations, latency, and cost. + + see Zep Cloud Docs: https://help.getzep.com + """ + + api_key: str + """Your Zep API key.""" + zep_client: Zep + """Zep client used for making API requests.""" + zep_client_async: AsyncZep + """Async Zep client used for making API requests.""" + session_id: str + """Zep session ID.""" + top_k: Optional[int] + """Number of items to return.""" + search_scope: SearchScope = "messages" + """Which documents to search. Messages or Summaries?""" + search_type: SearchType = "similarity" + """Type of search to perform (similarity / mmr)""" + mmr_lambda: Optional[float] = None + """Lambda value for MMR search.""" + + @root_validator(pre=True) + def create_client(cls, values: dict) -> dict: + try: + from zep_cloud.client import AsyncZep, Zep + except ImportError: + raise ImportError( + "Could not import zep-cloud package. " + "Please install it with `pip install zep-cloud`." + ) + if values.get("api_key") is None: + raise ValueError("Zep API key is required.") + values["zep_client"] = Zep(api_key=values.get("api_key")) + values["zep_client_async"] = AsyncZep(api_key=values.get("api_key")) + return values + + def _messages_search_result_to_doc( + self, results: List[MemorySearchResult] + ) -> List[Document]: + return [ + Document( + page_content=str(r.message.content), + metadata={ + "score": r.score, + "uuid": r.message.uuid_, + "created_at": r.message.created_at, + "token_count": r.message.token_count, + "role": r.message.role or r.message.role_type, + }, + ) + for r in results or [] + if r.message + ] + + def _summary_search_result_to_doc( + self, results: List[MemorySearchResult] + ) -> List[Document]: + return [ + Document( + page_content=str(r.summary.content), + metadata={ + "score": r.score, + "uuid": r.summary.uuid_, + "created_at": r.summary.created_at, + "token_count": r.summary.token_count, + }, + ) + for r in results + if r.summary + ] + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + metadata: Optional[Dict[str, Any]] = None, + ) -> List[Document]: + if not self.zep_client: + raise RuntimeError("Zep client not initialized.") + + results = self.zep_client.memory.search( + self.session_id, + text=query, + metadata=metadata, + search_scope=self.search_scope, + search_type=self.search_type, + mmr_lambda=self.mmr_lambda, + limit=self.top_k, + ) + + if self.search_scope == "summary": + return self._summary_search_result_to_doc(results) + + return self._messages_search_result_to_doc(results) + + async def _aget_relevant_documents( + self, + query: str, + *, + run_manager: AsyncCallbackManagerForRetrieverRun, + metadata: Optional[Dict[str, Any]] = None, + ) -> List[Document]: + if not self.zep_client_async: + raise RuntimeError("Zep client not initialized.") + + results = await self.zep_client_async.memory.search( + self.session_id, + text=query, + metadata=metadata, + search_scope=self.search_scope, + search_type=self.search_type, + mmr_lambda=self.mmr_lambda, + limit=self.top_k, + ) + + if self.search_scope == "summary": + return self._summary_search_result_to_doc(results) + + return self._messages_search_result_to_doc(results) diff --git a/libs/community/langchain_community/storage/__init__.py b/libs/community/langchain_community/storage/__init__.py index 0c689327a71..9a73d49110a 100644 --- a/libs/community/langchain_community/storage/__init__.py +++ b/libs/community/langchain_community/storage/__init__.py @@ -22,6 +22,9 @@ if TYPE_CHECKING: AstraDBByteStore, AstraDBStore, ) + from langchain_community.storage.cassandra import ( + CassandraByteStore, + ) from langchain_community.storage.mongodb import ( MongoDBStore, ) @@ -36,6 +39,7 @@ if TYPE_CHECKING: __all__ = [ "AstraDBByteStore", "AstraDBStore", + "CassandraByteStore", "MongoDBStore", "RedisStore", "UpstashRedisByteStore", @@ -45,6 +49,7 @@ __all__ = [ _module_lookup = { "AstraDBByteStore": "langchain_community.storage.astradb", "AstraDBStore": "langchain_community.storage.astradb", + "CassandraByteStore": "langchain_community.storage.cassandra", "MongoDBStore": "langchain_community.storage.mongodb", "RedisStore": "langchain_community.storage.redis", "UpstashRedisByteStore": "langchain_community.storage.upstash_redis", @@ -57,6 +62,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/storage/cassandra.py b/libs/community/langchain_community/storage/cassandra.py new file mode 100644 index 00000000000..280ce5b3a5a --- /dev/null +++ b/libs/community/langchain_community/storage/cassandra.py @@ -0,0 +1,188 @@ +from __future__ import annotations + +import asyncio +from asyncio import InvalidStateError, Task +from typing import ( + TYPE_CHECKING, + AsyncIterator, + Iterator, + List, + Optional, + Sequence, + Tuple, +) + +from langchain_core.stores import ByteStore + +from langchain_community.utilities.cassandra import SetupMode, aexecute_cql + +if TYPE_CHECKING: + from cassandra.cluster import Session + from cassandra.query import PreparedStatement + +CREATE_TABLE_CQL_TEMPLATE = """ + CREATE TABLE IF NOT EXISTS {keyspace}.{table} + (row_id TEXT, body_blob BLOB, PRIMARY KEY (row_id)); +""" +SELECT_TABLE_CQL_TEMPLATE = ( + """SELECT row_id, body_blob FROM {keyspace}.{table} WHERE row_id IN ?;""" +) +SELECT_ALL_TABLE_CQL_TEMPLATE = """SELECT row_id, body_blob FROM {keyspace}.{table};""" +INSERT_TABLE_CQL_TEMPLATE = ( + """INSERT INTO {keyspace}.{table} (row_id, body_blob) VALUES (?, ?);""" +) +DELETE_TABLE_CQL_TEMPLATE = """DELETE FROM {keyspace}.{table} WHERE row_id IN ?;""" + + +class CassandraByteStore(ByteStore): + def __init__( + self, + table: str, + *, + session: Optional[Session] = None, + keyspace: Optional[str] = None, + setup_mode: SetupMode = SetupMode.SYNC, + ) -> None: + if not session or not keyspace: + try: + from cassio.config import check_resolve_keyspace, check_resolve_session + + self.keyspace = keyspace or check_resolve_keyspace(keyspace) + self.session = session or check_resolve_session() + except (ImportError, ModuleNotFoundError): + raise ImportError( + "Could not import a recent cassio package." + "Please install it with `pip install --upgrade cassio`." + ) + else: + self.keyspace = keyspace + self.session = session + self.table = table + self.select_statement = None + self.insert_statement = None + self.delete_statement = None + + create_cql = CREATE_TABLE_CQL_TEMPLATE.format( + keyspace=self.keyspace, + table=self.table, + ) + self.db_setup_task: Optional[Task[None]] = None + if setup_mode == SetupMode.ASYNC: + self.db_setup_task = asyncio.create_task( + aexecute_cql(self.session, create_cql) + ) + else: + self.session.execute(create_cql) + + def ensure_db_setup(self) -> None: + if self.db_setup_task: + try: + self.db_setup_task.result() + except InvalidStateError: + raise ValueError( + "Asynchronous setup of the DB not finished. " + "NB: AstraDB components sync methods shouldn't be called from the " + "event loop. Consider using their async equivalents." + ) + + async def aensure_db_setup(self) -> None: + if self.db_setup_task: + await self.db_setup_task + + def get_select_statement(self) -> PreparedStatement: + if not self.select_statement: + self.select_statement = self.session.prepare( + SELECT_TABLE_CQL_TEMPLATE.format( + keyspace=self.keyspace, table=self.table + ) + ) + return self.select_statement + + def get_insert_statement(self) -> PreparedStatement: + if not self.insert_statement: + self.insert_statement = self.session.prepare( + INSERT_TABLE_CQL_TEMPLATE.format( + keyspace=self.keyspace, table=self.table + ) + ) + return self.insert_statement + + def get_delete_statement(self) -> PreparedStatement: + if not self.delete_statement: + self.delete_statement = self.session.prepare( + DELETE_TABLE_CQL_TEMPLATE.format( + keyspace=self.keyspace, table=self.table + ) + ) + return self.delete_statement + + def mget(self, keys: Sequence[str]) -> List[Optional[bytes]]: + from cassandra.query import ValueSequence + + self.ensure_db_setup() + docs_dict = {} + for row in self.session.execute( + self.get_select_statement(), [ValueSequence(keys)] + ): + docs_dict[row.row_id] = row.body_blob + return [docs_dict.get(key) for key in keys] + + async def amget(self, keys: Sequence[str]) -> List[Optional[bytes]]: + from cassandra.query import ValueSequence + + await self.aensure_db_setup() + docs_dict = {} + for row in await aexecute_cql( + self.session, self.get_select_statement(), parameters=[ValueSequence(keys)] + ): + docs_dict[row.row_id] = row.body_blob + return [docs_dict.get(key) for key in keys] + + def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) -> None: + self.ensure_db_setup() + insert_statement = self.get_insert_statement() + for k, v in key_value_pairs: + self.session.execute(insert_statement, (k, v)) + + async def amset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) -> None: + await self.aensure_db_setup() + insert_statement = self.get_insert_statement() + for k, v in key_value_pairs: + await aexecute_cql(self.session, insert_statement, parameters=(k, v)) + + def mdelete(self, keys: Sequence[str]) -> None: + from cassandra.query import ValueSequence + + self.ensure_db_setup() + self.session.execute(self.get_delete_statement(), [ValueSequence(keys)]) + + async def amdelete(self, keys: Sequence[str]) -> None: + from cassandra.query import ValueSequence + + await self.aensure_db_setup() + await aexecute_cql( + self.session, self.get_delete_statement(), parameters=[ValueSequence(keys)] + ) + + def yield_keys(self, *, prefix: Optional[str] = None) -> Iterator[str]: + self.ensure_db_setup() + for row in self.session.execute( + SELECT_ALL_TABLE_CQL_TEMPLATE.format( + keyspace=self.keyspace, table=self.table + ) + ): + key = row.row_id + if not prefix or key.startswith(prefix): + yield key + + async def ayield_keys(self, *, prefix: Optional[str] = None) -> AsyncIterator[str]: + await self.aensure_db_setup() + for row in await aexecute_cql( + self.session, + SELECT_ALL_TABLE_CQL_TEMPLATE.format( + keyspace=self.keyspace, table=self.table + ), + ): + key = row.row_id + if not prefix or key.startswith(prefix): + yield key diff --git a/libs/community/langchain_community/tools/__init__.py b/libs/community/langchain_community/tools/__init__.py index f426b8f0ec9..71445fa6cc9 100644 --- a/libs/community/langchain_community/tools/__init__.py +++ b/libs/community/langchain_community/tools/__init__.py @@ -90,6 +90,7 @@ if TYPE_CHECKING: from langchain_community.tools.convert_to_openai import ( format_tool_to_openai_function, ) + from langchain_community.tools.dataherald import DataheraldTextToSQL from langchain_community.tools.ddg_search.tool import ( DuckDuckGoSearchResults, DuckDuckGoSearchRun, @@ -356,6 +357,7 @@ __all__ = [ "CopyFileTool", "CurrentWebPageTool", "DeleteFileTool", + "DataheraldTextToSQL", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", @@ -610,6 +612,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/tools/bearly/tool.py b/libs/community/langchain_community/tools/bearly/tool.py index 286e024f64a..cd20e80a375 100644 --- a/libs/community/langchain_community/tools/bearly/tool.py +++ b/libs/community/langchain_community/tools/bearly/tool.py @@ -59,7 +59,7 @@ If you have any files outputted write them to "output/" relative to the executio path. Output can only be read from the directory, stdout, and stdin. \ Do not use things like plot.show() as it will \ not work instead write them out `output/` and a link to the file will be returned. \ -print() any output and results so you can capture the output.""" # noqa: T201 +print() any output and results so you can capture the output.""" class FileInfo(BaseModel): diff --git a/libs/community/langchain_community/tools/openapi/utils/api_models.py b/libs/community/langchain_community/tools/openapi/utils/api_models.py index 968f85dfa63..7b02655f4c1 100644 --- a/libs/community/langchain_community/tools/openapi/utils/api_models.py +++ b/libs/community/langchain_community/tools/openapi/utils/api_models.py @@ -55,6 +55,7 @@ class APIPropertyLocation(Enum): _SUPPORTED_MEDIA_TYPES = ("application/json",) SUPPORTED_LOCATIONS = { + APIPropertyLocation.HEADER, APIPropertyLocation.QUERY, APIPropertyLocation.PATH, } diff --git a/libs/community/langchain_community/tools/powerbi/tool.py b/libs/community/langchain_community/tools/powerbi/tool.py index 9f54ec453d4..77c1c482667 100644 --- a/libs/community/langchain_community/tools/powerbi/tool.py +++ b/libs/community/langchain_community/tools/powerbi/tool.py @@ -51,7 +51,7 @@ class QueryPowerBITool(BaseTool): for var in llm_chain.prompt.input_variables: if var not in ["tool_input", "tables", "schemas", "examples"]: raise ValueError( - "LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: C0301 E501 # pylint: disable=C0301 + "LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s", # noqa: E501 # pylint: disable=C0301 llm_chain.prompt.input_variables, ) return llm_chain diff --git a/libs/community/langchain_community/tools/wikipedia/tool.py b/libs/community/langchain_community/tools/wikipedia/tool.py index a74d437538d..66af94e41a0 100644 --- a/libs/community/langchain_community/tools/wikipedia/tool.py +++ b/libs/community/langchain_community/tools/wikipedia/tool.py @@ -1,13 +1,20 @@ """Tool for the Wikipedia API.""" -from typing import Optional +from typing import Optional, Type from langchain_core.callbacks import CallbackManagerForToolRun +from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.tools import BaseTool from langchain_community.utilities.wikipedia import WikipediaAPIWrapper +class WikipediaQueryInput(BaseModel): + """Input for the WikipediaQuery tool.""" + + query: str = Field(description="query to look up on wikipedia") + + class WikipediaQueryRun(BaseTool): """Tool that searches the Wikipedia API.""" @@ -20,6 +27,8 @@ class WikipediaQueryRun(BaseTool): ) api_wrapper: WikipediaAPIWrapper + args_schema: Type[BaseModel] = WikipediaQueryInput + def _run( self, query: str, diff --git a/libs/community/langchain_community/utilities/__init__.py b/libs/community/langchain_community/utilities/__init__.py index 6a069557b71..3a5184e7c03 100644 --- a/libs/community/langchain_community/utilities/__init__.py +++ b/libs/community/langchain_community/utilities/__init__.py @@ -35,6 +35,7 @@ if TYPE_CHECKING: from langchain_community.utilities.brave_search import ( BraveSearchWrapper, ) + from langchain_community.utilities.dataherald import DataheraldAPIWrapper from langchain_community.utilities.dria_index import ( DriaAPIWrapper, ) @@ -104,7 +105,7 @@ if TYPE_CHECKING: OpenWeatherMapAPIWrapper, ) from langchain_community.utilities.oracleai import ( - OracleSummary, # noqa: F401 + OracleSummary, ) from langchain_community.utilities.outline import ( OutlineAPIWrapper, @@ -124,6 +125,7 @@ if TYPE_CHECKING: from langchain_community.utilities.python import ( PythonREPL, ) + from langchain_community.utilities.rememberizer import RememberizerAPIWrapper from langchain_community.utilities.requests import ( Requests, RequestsWrapper, @@ -182,6 +184,7 @@ __all__ = [ "BibtexparserWrapper", "BingSearchAPIWrapper", "BraveSearchWrapper", + "DataheraldAPIWrapper", "DriaAPIWrapper", "DuckDuckGoSearchAPIWrapper", "GoldenQueryAPIWrapper", @@ -213,13 +216,14 @@ __all__ = [ "PowerBIDataset", "PubMedAPIWrapper", "PythonREPL", + "RememberizerAPIWrapper", "Requests", "RequestsWrapper", "RivaASR", "RivaTTS", - "SQLDatabase", "SceneXplainAPIWrapper", "SearchApiAPIWrapper", + "SQLDatabase", "SearxSearchWrapper", "SerpAPIWrapper", "SparkSQL", @@ -304,6 +308,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/utilities/arxiv.py b/libs/community/langchain_community/utilities/arxiv.py index 7b920415623..bab2df8061b 100644 --- a/libs/community/langchain_community/utilities/arxiv.py +++ b/libs/community/langchain_community/utilities/arxiv.py @@ -103,7 +103,7 @@ class ArxivAPIWrapper(BaseModel): Args: query: a plaintext search query - """ # noqa: E501 + """ try: if self.is_arxiv_identifier(query): results = self.arxiv_search( @@ -142,7 +142,7 @@ class ArxivAPIWrapper(BaseModel): Args: query: a plaintext search query - """ # noqa: E501 + """ try: if self.is_arxiv_identifier(query): results = self.arxiv_search( diff --git a/libs/community/langchain_community/utilities/awslambda.py b/libs/community/langchain_community/utilities/awslambda.py index 1b497dd5dd2..8dbaca563ae 100644 --- a/libs/community/langchain_community/utilities/awslambda.py +++ b/libs/community/langchain_community/utilities/awslambda.py @@ -60,7 +60,7 @@ class LambdaWrapper(BaseModel): query: an input to passed to the lambda function as the ``body`` of a JSON object. - """ # noqa: E501 + """ res = self.lambda_client.invoke( FunctionName=self.function_name, InvocationType="RequestResponse", diff --git a/libs/community/langchain_community/utilities/cassandra.py b/libs/community/langchain_community/utilities/cassandra.py index cd588508965..52b0963c896 100644 --- a/libs/community/langchain_community/utilities/cassandra.py +++ b/libs/community/langchain_community/utilities/cassandra.py @@ -5,7 +5,7 @@ from enum import Enum from typing import TYPE_CHECKING, Any, Callable if TYPE_CHECKING: - from cassandra.cluster import ResponseFuture + from cassandra.cluster import ResponseFuture, Session async def wrapped_response_future( @@ -35,6 +35,10 @@ async def wrapped_response_future( return await asyncio_future +async def aexecute_cql(session: Session, query: str, **kwargs: Any) -> Any: + return await wrapped_response_future(session.execute_async, query, **kwargs) + + class SetupMode(Enum): SYNC = 1 ASYNC = 2 diff --git a/libs/community/langchain_community/utilities/powerbi.py b/libs/community/langchain_community/utilities/powerbi.py index 93d48867968..085c9b8a0ab 100644 --- a/libs/community/langchain_community/utilities/powerbi.py +++ b/libs/community/langchain_community/utilities/powerbi.py @@ -61,7 +61,7 @@ class PowerBIDataset(BaseModel): """Get the request url.""" if self.group_id: return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301 - return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301 + return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # pylint: disable=C0301 @property def headers(self) -> Dict[str, str]: diff --git a/libs/community/langchain_community/utilities/tavily_search.py b/libs/community/langchain_community/utilities/tavily_search.py index 97dc45363f2..95eb52266ab 100644 --- a/libs/community/langchain_community/utilities/tavily_search.py +++ b/libs/community/langchain_community/utilities/tavily_search.py @@ -97,7 +97,7 @@ class TavilySearchAPIWrapper(BaseModel): content: The content of the result. score: The score of the result. raw_content: The raw content of the result. - """ # noqa: E501 + """ raw_search_results = self.raw_results( query, max_results=max_results, diff --git a/libs/community/langchain_community/utilities/twilio.py b/libs/community/langchain_community/utilities/twilio.py index a3ff0b23696..d1c3fce5827 100644 --- a/libs/community/langchain_community/utilities/twilio.py +++ b/libs/community/langchain_community/utilities/twilio.py @@ -40,7 +40,7 @@ class TwilioAPIWrapper(BaseModel): Twilio also work here. You cannot, for example, spoof messages from a private cell phone number. If you are using `messaging_service_sid`, this parameter must be empty. - """ # noqa: E501 + """ class Config: """Configuration for this pydantic object.""" @@ -77,6 +77,6 @@ class TwilioAPIWrapper(BaseModel): SMS/MMS or [Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses) for other 3rd-party channels. - """ # noqa: E501 + """ message = self.client.messages.create(to, from_=self.from_number, body=body) return message.sid diff --git a/libs/community/langchain_community/vectorstores/__init__.py b/libs/community/langchain_community/vectorstores/__init__.py index bdd0bb96bd4..3856d67881f 100644 --- a/libs/community/langchain_community/vectorstores/__init__.py +++ b/libs/community/langchain_community/vectorstores/__init__.py @@ -153,6 +153,10 @@ if TYPE_CHECKING: from langchain_community.vectorstores.llm_rails import ( LLMRails, ) + from langchain_community.vectorstores.manticore_search import ( + ManticoreSearch, + ManticoreSearchSettings, + ) from langchain_community.vectorstores.marqo import ( Marqo, ) @@ -182,7 +186,7 @@ if TYPE_CHECKING: OpenSearchVectorSearch, ) from langchain_community.vectorstores.oraclevs import ( - OracleVS, # noqa: F401 + OracleVS, ) from langchain_community.vectorstores.pathway import ( PathwayVectorClient, @@ -290,6 +294,9 @@ if TYPE_CHECKING: from langchain_community.vectorstores.zep import ( ZepVectorStore, ) + from langchain_community.vectorstores.zep_cloud import ( + ZepCloudVectorStore, + ) from langchain_community.vectorstores.zilliz import ( Zilliz, ) @@ -341,6 +348,8 @@ __all__ = [ "LLMRails", "LanceDB", "Lantern", + "ManticoreSearch", + "ManticoreSearchSettings", "Marqo", "MatchingEngine", "Meilisearch", @@ -389,6 +398,7 @@ __all__ = [ "Weaviate", "Yellowbrick", "ZepVectorStore", + "ZepCloudVectorStore", "Zilliz", ] @@ -439,6 +449,8 @@ _module_lookup = { "LLMRails": "langchain_community.vectorstores.llm_rails", "LanceDB": "langchain_community.vectorstores.lancedb", "Lantern": "langchain_community.vectorstores.lantern", + "ManticoreSearch": "langchain_community.vectorstores.manticore_search", + "ManticoreSearchSettings": "langchain_community.vectorstores.manticore_search", "Marqo": "langchain_community.vectorstores.marqo", "MatchingEngine": "langchain_community.vectorstores.matching_engine", "Meilisearch": "langchain_community.vectorstores.meilisearch", @@ -448,7 +460,7 @@ _module_lookup = { "MyScale": "langchain_community.vectorstores.myscale", "MyScaleSettings": "langchain_community.vectorstores.myscale", "Neo4jVector": "langchain_community.vectorstores.neo4j_vector", - "NeuralDBClientVectorStore": "langchain_community.vectorstores.thirdai_neuraldb", # noqa: E501 + "NeuralDBClientVectorStore": "langchain_community.vectorstores.thirdai_neuraldb", "NeuralDBVectorStore": "langchain_community.vectorstores.thirdai_neuraldb", "OpenSearchVectorSearch": "langchain_community.vectorstores.opensearch_vector_search", # noqa: E501 "OracleVS": "langchain_community.vectorstores.oraclevs", @@ -487,6 +499,7 @@ _module_lookup = { "Weaviate": "langchain_community.vectorstores.weaviate", "Yellowbrick": "langchain_community.vectorstores.yellowbrick", "ZepVectorStore": "langchain_community.vectorstores.zep", + "ZepCloudVectorStore": "langchain_community.vectorstores.zep_cloud", "Zilliz": "langchain_community.vectorstores.zilliz", } @@ -496,6 +509,3 @@ def __getattr__(name: str) -> Any: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}") - - -__all__ = list(_module_lookup.keys()) diff --git a/libs/community/langchain_community/vectorstores/azuresearch.py b/libs/community/langchain_community/vectorstores/azuresearch.py index a49bf03984a..1ba6bbf1b80 100644 --- a/libs/community/langchain_community/vectorstores/azuresearch.py +++ b/libs/community/langchain_community/vectorstores/azuresearch.py @@ -1,6 +1,7 @@ from __future__ import annotations import base64 +import itertools import json import logging import uuid @@ -29,10 +30,12 @@ from langchain_core.retrievers import BaseRetriever from langchain_core.utils import get_from_env from langchain_core.vectorstores import VectorStore +from langchain_community.vectorstores.utils import maximal_marginal_relevance + logger = logging.getLogger() if TYPE_CHECKING: - from azure.search.documents import SearchClient + from azure.search.documents import SearchClient, SearchItemPaged from azure.search.documents.indexes.models import ( CorsOptions, ScoringProfile, @@ -236,6 +239,8 @@ class AzureSearch(VectorStore): scoring_profiles: Optional[List[ScoringProfile]] = None, default_scoring_profile: Optional[str] = None, cors_options: Optional[CorsOptions] = None, + *, + vector_search_dimensions: Optional[int] = None, **kwargs: Any, ): from azure.search.documents.indexes.models import ( @@ -269,7 +274,8 @@ class AzureSearch(VectorStore): name=FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(SearchFieldDataType.Single), searchable=True, - vector_search_dimensions=len(self.embed_query("Text")), + vector_search_dimensions=vector_search_dimensions + or len(self.embed_query("Text")), vector_search_profile_name="myHnswProfile", ), SearchableField( @@ -311,7 +317,6 @@ class AzureSearch(VectorStore): ) -> List[str]: """Add texts data to an existing index.""" keys = kwargs.get("keys") - ids = [] # batching support if embedding function is an Embeddings object if isinstance(self.embedding_function, Embeddings): @@ -326,9 +331,21 @@ class AzureSearch(VectorStore): logger.debug("Nothing to insert, skipping.") return [] + return self.add_embeddings(zip(texts, embeddings), metadatas, keys=keys) + + def add_embeddings( + self, + text_embeddings: Iterable[Tuple[str, List[float]]], + metadatas: Optional[List[dict]] = None, + *, + keys: Optional[List[str]] = None, + ) -> List[str]: + """Add embeddings to an existing index.""" + ids = [] + # Write data to index data = [] - for i, text in enumerate(texts): + for i, (text, embedding) in enumerate(text_embeddings): # Use provided key otherwise use default key key = keys[i] if keys else str(uuid.uuid4()) # Encoding key for Azure Search valid characters @@ -340,9 +357,7 @@ class AzureSearch(VectorStore): "@search.action": "upload", FIELDS_ID: key, FIELDS_CONTENT: text, - FIELDS_CONTENT_VECTOR: np.array( - embeddings[i], dtype=np.float32 - ).tolist(), + FIELDS_CONTENT_VECTOR: np.array(embedding, dtype=np.float32).tolist(), FIELDS_METADATA: json.dumps(metadata), } if metadata: @@ -358,7 +373,7 @@ class AzureSearch(VectorStore): if len(data) == MAX_UPLOAD_BATCH_SIZE: response = self.client.upload_documents(documents=data) # Check if all documents were successfully uploaded - if not all([r.succeeded for r in response]): + if not all(r.succeeded for r in response): raise Exception(response) # Reset data data = [] @@ -370,7 +385,7 @@ class AzureSearch(VectorStore): # Upload data to index response = self.client.upload_documents(documents=data) # Check if all documents were successfully uploaded - if all([r.succeeded for r in response]): + if all(r.succeeded for r in response): return ids else: raise Exception(response) @@ -433,48 +448,61 @@ class AzureSearch(VectorStore): return [doc for doc, _ in docs_and_scores] def vector_search_with_score( - self, query: str, k: int = 4, filters: Optional[str] = None + self, + query: str, + k: int = 4, + filters: Optional[str] = None, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: - query: Text to look up documents similar to. - k: Number of Documents to return. Defaults to 4. + query (str): Text to look up documents similar to. + k (int, optional): Number of Documents to return. Defaults to 4. + filters (str, optional): Filtering expression. Defaults to None. Returns: - List of Documents most similar to the query and score for each + List[Tuple[Document, float]]: List of Documents most similar + to the query and score for each """ + embedding = self.embed_query(query) + results = self._simple_search(embedding, "", k, filters=filters, **kwargs) - from azure.search.documents.models import VectorizedQuery + return _results_to_documents(results) - results = self.client.search( - search_text="", - vector_queries=[ - VectorizedQuery( - vector=np.array(self.embed_query(query), dtype=np.float32).tolist(), - k_nearest_neighbors=k, - fields=FIELDS_CONTENT_VECTOR, - ) - ], - filter=filters, - top=k, + def max_marginal_relevance_search_with_score( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filters: Optional[str] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Perform a search and return results that are reordered by MMR. + + Args: + query (str): Text to look up documents similar to. + k (int, optional): How many results to give. Defaults to 4. + fetch_k (int, optional): Total results to select k from. + Defaults to 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5 + filters (str, optional): Filtering expression. Defaults to None. + + Returns: + List[Tuple[Document, float]]: List of Documents most similar + to the query and score for each + """ + embedding = self.embed_query(query) + results = self._simple_search(embedding, "", fetch_k, filters=filters, **kwargs) + + return _reorder_results_with_maximal_marginal_relevance( + results, query_embedding=np.array(embedding), lambda_mult=lambda_mult, k=k ) - # Convert results to Document objects - docs = [ - ( - Document( - page_content=result.pop(FIELDS_CONTENT), - metadata=json.loads(result[FIELDS_METADATA]) - if FIELDS_METADATA in result - else { - k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR - }, - ), - float(result["@search.score"]), - ) - for result in results - ] - return docs def hybrid_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: """ @@ -487,13 +515,15 @@ class AzureSearch(VectorStore): Returns: List[Document]: A list of documents that are most similar to the query text. """ - docs_and_scores = self.hybrid_search_with_score( - query, k=k, filters=kwargs.get("filters", None) - ) + docs_and_scores = self.hybrid_search_with_score(query, k=k, **kwargs) return [doc for doc, _ in docs_and_scores] def hybrid_search_with_score( - self, query: str, k: int = 4, filters: Optional[str] = None + self, + query: str, + k: int = 4, + filters: Optional[str] = None, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query with a hybrid query. @@ -504,36 +534,11 @@ class AzureSearch(VectorStore): Returns: List of Documents most similar to the query and score for each """ - from azure.search.documents.models import VectorizedQuery - results = self.client.search( - search_text=query, - vector_queries=[ - VectorizedQuery( - vector=np.array(self.embed_query(query), dtype=np.float32).tolist(), - k_nearest_neighbors=k, - fields=FIELDS_CONTENT_VECTOR, - ) - ], - filter=filters, - top=k, - ) - # Convert results to Document objects - docs = [ - ( - Document( - page_content=result.pop(FIELDS_CONTENT), - metadata=json.loads(result[FIELDS_METADATA]) - if FIELDS_METADATA in result - else { - k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR - }, - ), - float(result["@search.score"]), - ) - for result in results - ] - return docs + embedding = self.embed_query(query) + results = self._simple_search(embedding, query, k, filters=filters, **kwargs) + + return _results_to_documents(results) def hybrid_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any @@ -546,6 +551,79 @@ class AzureSearch(VectorStore): else [r for r in result if r[1] >= score_threshold] ) + def hybrid_max_marginal_relevance_search_with_score( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filters: Optional[str] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query with a hybrid query + and reorder results by MMR. + + Args: + query (str): Text to look up documents similar to. + k (int, optional): Number of Documents to return. Defaults to 4. + fetch_k (int, optional): Total results to select k from. + Defaults to 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5 + filters (str, optional): Filtering expression. Defaults to None. + + Returns: + List of Documents most similar to the query and score for each + """ + + embedding = self.embed_query(query) + results = self._simple_search( + embedding, query, fetch_k, filters=filters, **kwargs + ) + + return _reorder_results_with_maximal_marginal_relevance( + results, query_embedding=np.array(embedding), lambda_mult=lambda_mult, k=k + ) + + def _simple_search( + self, + embedding: List[float], + text_query: str, + k: int, + *, + filters: Optional[str] = None, + **kwargs: Any, + ) -> SearchItemPaged[dict]: + """Perform vector or hybrid search in the Azure search index. + + Args: + embedding: A vector embedding to search in the vector space. + text_query: A full-text search query expression; + Use "*" or omit this parameter to perform only vector search. + k: Number of documents to return. + filters: Filtering expression. + Returns: + Search items + """ + from azure.search.documents.models import VectorizedQuery + + return self.client.search( + search_text=text_query, + vector_queries=[ + VectorizedQuery( + vector=np.array(embedding, dtype=np.float32).tolist(), + k_nearest_neighbors=k, + fields=FIELDS_CONTENT_VECTOR, + ) + ], + filter=filters, + top=k, + **kwargs, + ) + def semantic_hybrid_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: @@ -555,12 +633,13 @@ class AzureSearch(VectorStore): Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. + filters: Filtering expression. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank( - query, k=k, filters=kwargs.get("filters", None) + query, k=k, **kwargs ) return [doc for doc, _, _ in docs_and_scores] @@ -579,6 +658,7 @@ class AzureSearch(VectorStore): k (int): The number of documents to return. Default is 4. score_type: Must either be "score" or "reranker_score". Defaulted to "score". + filters: Filtering expression. Returns: List[Tuple[Document, float]]: A list of documents and their @@ -586,7 +666,7 @@ class AzureSearch(VectorStore): """ score_threshold = kwargs.pop("score_threshold", None) docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank( - query, k=k, filters=kwargs.get("filters", None) + query, k=k, **kwargs ) if score_type == "score": return [ @@ -602,13 +682,14 @@ class AzureSearch(VectorStore): ] def semantic_hybrid_search_with_score_and_rerank( - self, query: str, k: int = 4, filters: Optional[str] = None + self, query: str, k: int = 4, *, filters: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float, float]]: """Return docs most similar to query with a hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. + filters: Filtering expression. Returns: List of Documents most similar to the query and score for each @@ -630,6 +711,7 @@ class AzureSearch(VectorStore): query_caption="extractive", query_answer="extractive", top=k, + **kwargs, ) # Get Semantic Answers semantic_answers = results.get_answers() or [] @@ -696,10 +778,66 @@ class AzureSearch(VectorStore): index_name, embedding, fields=fields, + **kwargs, ) azure_search.add_texts(texts, metadatas, **kwargs) return azure_search + @classmethod + async def afrom_embeddings( + cls: Type[AzureSearch], + text_embeddings: Iterable[Tuple[str, List[float]]], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + *, + azure_search_endpoint: str = "", + azure_search_key: str = "", + index_name: str = "langchain-index", + fields: Optional[List[SearchField]] = None, + **kwargs: Any, + ) -> AzureSearch: + return cls.from_embeddings( + text_embeddings, + embedding, + metadatas=metadatas, + azure_search_endpoint=azure_search_endpoint, + azure_search_key=azure_search_key, + index_name=index_name, + fields=fields, + **kwargs, + ) + + @classmethod + def from_embeddings( + cls: Type[AzureSearch], + text_embeddings: Iterable[Tuple[str, List[float]]], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + *, + azure_search_endpoint: str = "", + azure_search_key: str = "", + index_name: str = "langchain-index", + fields: Optional[List[SearchField]] = None, + **kwargs: Any, + ) -> AzureSearch: + # Creating a new Azure Search instance + text_embeddings, first_text_embedding = _peek(text_embeddings) + if first_text_embedding is None: + raise ValueError("Cannot create AzureSearch from empty embeddings.") + vector_search_dimensions = len(first_text_embedding[1]) + + azure_search = cls( + azure_search_endpoint=azure_search_endpoint, + azure_search_key=azure_search_key, + index_name=index_name, + embedding_function=embedding, + fields=fields, + vector_search_dimensions=vector_search_dimensions, + **kwargs, + ) + azure_search.add_embeddings(text_embeddings, metadatas, **kwargs) + return azure_search + def as_retriever(self, **kwargs: Any) -> AzureSearchVectorStoreRetriever: # type: ignore """Return AzureSearchVectorStoreRetriever initialized from this VectorStore. @@ -710,7 +848,6 @@ class AzureSearch(VectorStore): "semantic_hybrid". search_kwargs (Optional[Dict]): Keyword arguments to pass to the search function. Can include things like: - k: Amount of documents to return (Default: 4) score_threshold: Minimum relevance threshold for similarity_score_threshold fetch_k: Amount of documents to pass to MMR algorithm (Default: 20) @@ -737,6 +874,16 @@ class AzureSearchVectorStoreRetriever(BaseRetriever): or "semantic_hybrid_score_threshold".""" k: int = 4 """Number of documents to return.""" + search_kwargs: dict = {} + """Search params. + score_threshold: Minimum relevance threshold + for similarity_score_threshold + fetch_k: Amount of documents to pass to MMR algorithm (Default: 20) + lambda_mult: Diversity of results returned by MMR; + 1 for minimum diversity and 0 for maximum. (Default: 0.5) + filter: Filter by document metadata + """ + allowed_search_types: ClassVar[Collection[str]] = ( "similarity", "similarity_score_threshold", @@ -769,33 +916,102 @@ class AzureSearchVectorStoreRetriever(BaseRetriever): run_manager: CallbackManagerForRetrieverRun, **kwargs: Any, ) -> List[Document]: + params = {**self.search_kwargs, **kwargs} + if self.search_type == "similarity": - docs = self.vectorstore.vector_search(query, k=self.k, **kwargs) + docs = self.vectorstore.vector_search(query, k=self.k, **params) elif self.search_type == "similarity_score_threshold": docs = [ doc for doc, _ in self.vectorstore.similarity_search_with_relevance_scores( - query, k=self.k, **kwargs + query, k=self.k, **params ) ] elif self.search_type == "hybrid": - docs = self.vectorstore.hybrid_search(query, k=self.k, **kwargs) + docs = self.vectorstore.hybrid_search(query, k=self.k, **params) elif self.search_type == "hybrid_score_threshold": docs = [ doc for doc, _ in self.vectorstore.hybrid_search_with_relevance_scores( - query, k=self.k, **kwargs + query, k=self.k, **params ) ] elif self.search_type == "semantic_hybrid": - docs = self.vectorstore.semantic_hybrid_search(query, k=self.k, **kwargs) + docs = self.vectorstore.semantic_hybrid_search(query, k=self.k, **params) elif self.search_type == "semantic_hybrid_score_threshold": docs = [ doc for doc, _ in self.vectorstore.semantic_hybrid_search_with_score( - query, k=self.k, **kwargs + query, k=self.k, **params ) ] else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs + + +def _results_to_documents( + results: SearchItemPaged[Dict], +) -> List[Tuple[Document, float]]: + docs = [ + ( + _result_to_document(result), + float(result["@search.score"]), + ) + for result in results + ] + return docs + + +def _reorder_results_with_maximal_marginal_relevance( + results: SearchItemPaged[Dict], + query_embedding: np.ndarray, + lambda_mult: float = 0.5, + k: int = 4, +) -> List[Tuple[Document, float]]: + # Convert results to Document objects + docs = [ + ( + _result_to_document(result), + float(result["@search.score"]), + result[FIELDS_CONTENT_VECTOR], + ) + for result in results + ] + documents, scores, vectors = map(list, zip(*docs)) + + # Get the new order of results. + new_ordering = maximal_marginal_relevance( + query_embedding, vectors, k=k, lambda_mult=lambda_mult + ) + + # Reorder the values and return. + ret: List[Tuple[Document, float]] = [] + for x in new_ordering: + # Function can return -1 index + if x == -1: + break + ret.append((documents[x], scores[x])) # type: ignore + + return ret + + +def _result_to_document(result: Dict) -> Document: + return Document( + page_content=result.pop(FIELDS_CONTENT), + metadata=json.loads(result[FIELDS_METADATA]) + if FIELDS_METADATA in result + else { + key: value for key, value in result.items() if key != FIELDS_CONTENT_VECTOR + }, + ) + + +def _peek(iterable: Iterable, default: Optional[Any] = None) -> Tuple[Iterable, Any]: + try: + iterator = iter(iterable) + value = next(iterator) + iterable = itertools.chain([value], iterator) + return iterable, value + except StopIteration: + return iterable, default diff --git a/libs/community/langchain_community/vectorstores/clickhouse.py b/libs/community/langchain_community/vectorstores/clickhouse.py index e2083deb47e..d950a541be8 100644 --- a/libs/community/langchain_community/vectorstores/clickhouse.py +++ b/libs/community/langchain_community/vectorstores/clickhouse.py @@ -39,6 +39,7 @@ class ClickhouseSettings(BaseSettings): port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. + secure (bool) : Connect to server over secure connection. Defaults to False. index_type (str): index type string. index_param (list): index build parameter. index_query_params(dict): index query parameters. @@ -72,6 +73,8 @@ class ClickhouseSettings(BaseSettings): username: Optional[str] = None password: Optional[str] = None + secure: bool = False + index_type: Optional[str] = "annoy" # Annoy supports L2Distance and cosineDistance. index_param: Optional[Union[List, Dict]] = ["'L2Distance'", 100] @@ -193,6 +196,7 @@ class Clickhouse(VectorStore): port=self.config.port, username=self.config.username, password=self.config.password, + secure=self.config.secure, **kwargs, ) # Enable JSON type diff --git a/libs/community/langchain_community/vectorstores/duckdb.py b/libs/community/langchain_community/vectorstores/duckdb.py index dd3b1611e87..e949d6ac1ab 100644 --- a/libs/community/langchain_community/vectorstores/duckdb.py +++ b/libs/community/langchain_community/vectorstores/duckdb.py @@ -2,13 +2,23 @@ from __future__ import annotations import json +import logging import uuid +import warnings from typing import Any, Iterable, List, Optional, Type from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VST, VectorStore +logger = logging.getLogger(__name__) + +DEFAULT_VECTOR_KEY = "embedding" +DEFAULT_ID_KEY = "id" +DEFAULT_TEXT_KEY = "text" +DEFAULT_TABLE_NAME = "embeddings" +SIMILARITY_ALIAS = "similarity_score" + class DuckDB(VectorStore): """`DuckDB` vector store. @@ -76,10 +86,10 @@ class DuckDB(VectorStore): *, connection: Optional[Any] = None, embedding: Embeddings, - vector_key: str = "embedding", - id_key: str = "id", - text_key: str = "text", - table_name: str = "vectorstore", + vector_key: str = DEFAULT_VECTOR_KEY, + id_key: str = DEFAULT_ID_KEY, + text_key: str = DEFAULT_TEXT_KEY, + table_name: str = DEFAULT_TABLE_NAME, ): """Initialize with DuckDB connection and setup for vector storage.""" try: @@ -100,8 +110,6 @@ class DuckDB(VectorStore): raise ValueError("An embedding function or model must be provided.") if connection is None: - import warnings - warnings.warn( "No DuckDB connection provided. A new connection will be created." "This connection is running in memory and no data will be persisted." @@ -138,6 +146,17 @@ class DuckDB(VectorStore): Returns: List of ids of the added texts. """ + have_pandas = False + try: + import pandas as pd + + have_pandas = True + except ImportError: + logger.info( + "Unable to import pandas. " + "Install it with `pip install -U pandas` " + "to improve performance of add_texts()." + ) # Extract ids from kwargs or generate new ones if not provided ids = kwargs.pop("ids", [str(uuid.uuid4()) for _ in texts]) @@ -145,6 +164,7 @@ class DuckDB(VectorStore): # Embed texts and create documents ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self._embedding.embed_documents(list(texts)) + data = [] for idx, text in enumerate(texts): embedding = embeddings[idx] # Serialize metadata if present, else default to None @@ -153,9 +173,26 @@ class DuckDB(VectorStore): if metadatas and idx < len(metadatas) else None ) + if have_pandas: + data.append( + { + self._id_key: ids[idx], + self._text_key: text, + self._vector_key: embedding, + "metadata": metadata, + } + ) + else: + self._connection.execute( + f"INSERT INTO {self._table_name} VALUES (?,?,?,?)", + [ids[idx], text, embedding, metadata], + ) + + if have_pandas: + # noinspection PyUnusedLocal + df = pd.DataFrame.from_dict(data) # noqa: F841 self._connection.execute( - f"INSERT INTO {self._table_name} VALUES (?,?,?,?)", - [ids[idx], text, embedding, metadata], + f"INSERT INTO {self._table_name} SELECT * FROM df", ) return ids @@ -181,20 +218,21 @@ class DuckDB(VectorStore): self._table.select( *[ self.duckdb.StarExpression(exclude=[]), - list_cosine_similarity.alias("similarity"), + list_cosine_similarity.alias(SIMILARITY_ALIAS), ] ) - .order("similarity desc") + .order(f"{SIMILARITY_ALIAS} desc") .limit(k) - .select( - self.duckdb.StarExpression(exclude=["similarity", self._vector_key]) - ) .fetchdf() ) return [ Document( page_content=docs[self._text_key][idx], - metadata=json.loads(docs["metadata"][idx]) + metadata={ + **json.loads(docs["metadata"][idx]), + # using underscore prefix to avoid conflicts with user metadata keys + f"_{SIMILARITY_ALIAS}": docs[SIMILARITY_ALIAS][idx], + } if docs["metadata"][idx] else {}, ) @@ -231,10 +269,10 @@ class DuckDB(VectorStore): # Extract kwargs for DuckDB instance creation connection = kwargs.get("connection", None) - vector_key = kwargs.get("vector_key", "vector") - id_key = kwargs.get("id_key", "id") - text_key = kwargs.get("text_key", "text") - table_name = kwargs.get("table_name", "embeddings") + vector_key = kwargs.get("vector_key", DEFAULT_VECTOR_KEY) + id_key = kwargs.get("id_key", DEFAULT_ID_KEY) + text_key = kwargs.get("text_key", DEFAULT_TEXT_KEY) + table_name = kwargs.get("table_name", DEFAULT_TABLE_NAME) # Create an instance of DuckDB instance = DuckDB( diff --git a/libs/community/langchain_community/vectorstores/hanavector.py b/libs/community/langchain_community/vectorstores/hanavector.py index ca595dec935..724c3d93b2a 100644 --- a/libs/community/langchain_community/vectorstores/hanavector.py +++ b/libs/community/langchain_community/vectorstores/hanavector.py @@ -1,4 +1,5 @@ """SAP HANA Cloud Vector Engine""" + from __future__ import annotations import importlib.util @@ -85,6 +86,8 @@ class HanaDB(VectorStore): metadata_column: str = default_metadata_column, vector_column: str = default_vector_column, vector_column_length: int = default_vector_column_length, + *, + specific_metadata_columns: Optional[List[str]] = None, ): # Check if the hdbcli package is installed if importlib.util.find_spec("hdbcli") is None: @@ -110,6 +113,9 @@ class HanaDB(VectorStore): self.metadata_column = HanaDB._sanitize_name(metadata_column) self.vector_column = HanaDB._sanitize_name(vector_column) self.vector_column_length = HanaDB._sanitize_int(vector_column_length) + self.specific_metadata_columns = HanaDB._sanitize_specific_metadata_columns( + specific_metadata_columns or [] + ) # Check if the table exists, and eventually create it if not self._table_exists(self.table_name): @@ -139,6 +145,8 @@ class HanaDB(VectorStore): ["REAL_VECTOR"], self.vector_column_length, ) + for column_name in self.specific_metadata_columns: + self._check_column(self.table_name, column_name) def _table_exists(self, table_name) -> bool: # type: ignore[no-untyped-def] sql_str = ( @@ -156,7 +164,9 @@ class HanaDB(VectorStore): cur.close() return False - def _check_column(self, table_name, column_name, column_type, column_length=None): # type: ignore[no-untyped-def] + def _check_column( # type: ignore[no-untyped-def] + self, table_name, column_name, column_type=None, column_length=None + ): sql_str = ( "SELECT DATA_TYPE_NAME, LENGTH FROM SYS.TABLE_COLUMNS WHERE " "SCHEMA_NAME = CURRENT_SCHEMA " @@ -170,10 +180,11 @@ class HanaDB(VectorStore): if len(rows) == 0: raise AttributeError(f"Column {column_name} does not exist") # Check data type - if rows[0][0] not in column_type: - raise AttributeError( - f"Column {column_name} has the wrong type: {rows[0][0]}" - ) + if column_type: + if rows[0][0] not in column_type: + raise AttributeError( + f"Column {column_name} has the wrong type: {rows[0][0]}" + ) # Check length, if parameter was provided if column_length is not None: if rows[0][1] != column_length: @@ -189,17 +200,20 @@ class HanaDB(VectorStore): def embeddings(self) -> Embeddings: return self.embedding + @staticmethod def _sanitize_name(input_str: str) -> str: # type: ignore[misc] # Remove characters that are not alphanumeric or underscores return re.sub(r"[^a-zA-Z0-9_]", "", input_str) + @staticmethod def _sanitize_int(input_int: any) -> int: # type: ignore[valid-type] value = int(str(input_int)) if value < -1: raise ValueError(f"Value ({value}) must not be smaller than -1") return int(str(input_int)) - def _sanitize_list_float(embedding: List[float]) -> List[float]: # type: ignore[misc] + @staticmethod + def _sanitize_list_float(embedding: List[float]) -> List[float]: for value in embedding: if not isinstance(value, float): raise ValueError(f"Value ({value}) does not have type float") @@ -208,13 +222,36 @@ class HanaDB(VectorStore): # Compile pattern only once, for better performance _compiled_pattern = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*$") - def _sanitize_metadata_keys(metadata: dict) -> dict: # type: ignore[misc] + @staticmethod + def _sanitize_metadata_keys(metadata: dict) -> dict: for key in metadata.keys(): if not HanaDB._compiled_pattern.match(key): raise ValueError(f"Invalid metadata key {key}") return metadata + @staticmethod + def _sanitize_specific_metadata_columns( + specific_metadata_columns: List[str], + ) -> List[str]: + metadata_columns = [] + for c in specific_metadata_columns: + sanitized_name = HanaDB._sanitize_name(c) + metadata_columns.append(sanitized_name) + return metadata_columns + + def _split_off_special_metadata(self, metadata: dict) -> Tuple[dict, list]: + # Use provided values by default or fallback + special_metadata = [] + + if not metadata: + return {}, [] + + for column_name in self.specific_metadata_columns: + special_metadata.append(metadata.get(column_name, None)) + + return metadata, special_metadata + def add_texts( # type: ignore[override] self, texts: Iterable[str], @@ -238,30 +275,45 @@ class HanaDB(VectorStore): if embeddings is None: embeddings = self.embedding.embed_documents(list(texts)) + # Create sql parameters array + sql_params = [] + for i, text in enumerate(texts): + metadata = metadatas[i] if metadatas else {} + metadata, extracted_special_metadata = self._split_off_special_metadata( + metadata + ) + embedding = ( + embeddings[i] + if embeddings + else self.embedding.embed_documents([text])[0] + ) + sql_params.append( + ( + text, + json.dumps(HanaDB._sanitize_metadata_keys(metadata)), + f"[{','.join(map(str, embedding))}]", + *extracted_special_metadata, + ) + ) + + # Insert data into the table cur = self.connection.cursor() try: - # Insert data into the table - for i, text in enumerate(texts): - # Use provided values by default or fallback - metadata = metadatas[i] if metadatas else {} - embedding = ( - embeddings[i] - if embeddings - else self.embedding.embed_documents([text])[0] - ) - sql_str = ( - f'INSERT INTO "{self.table_name}" ("{self.content_column}", ' - f'"{self.metadata_column}", "{self.vector_column}") ' - f"VALUES (?, ?, TO_REAL_VECTOR (?));" - ) - cur.execute( - sql_str, - ( - text, - json.dumps(HanaDB._sanitize_metadata_keys(metadata)), - f"[{','.join(map(str, embedding))}]", - ), + specific_metadata_columns_string = '", "'.join( + self.specific_metadata_columns + ) + if specific_metadata_columns_string: + specific_metadata_columns_string = ( + ', "' + specific_metadata_columns_string + '"' ) + sql_str = ( + f'INSERT INTO "{self.table_name}" ("{self.content_column}", ' + f'"{self.metadata_column}", ' + f'"{self.vector_column}"{specific_metadata_columns_string}) ' + f"VALUES (?, ?, TO_REAL_VECTOR (?)" + f"{', ?' * len(self.specific_metadata_columns)});" + ) + cur.executemany(sql_str, sql_params) finally: cur.close() return [] @@ -279,6 +331,8 @@ class HanaDB(VectorStore): metadata_column: str = default_metadata_column, vector_column: str = default_vector_column, vector_column_length: int = default_vector_column_length, + *, + specific_metadata_columns: Optional[List[str]] = None, ): """Create a HanaDB instance from raw documents. This is a user-friendly interface that: @@ -297,6 +351,7 @@ class HanaDB(VectorStore): metadata_column=metadata_column, vector_column=vector_column, vector_column_length=vector_column_length, # -1 means dynamic length + specific_metadata_columns=specific_metadata_columns, ) instance.add_texts(texts, metadatas) return instance @@ -514,10 +569,12 @@ class HanaDB(VectorStore): f"Unsupported filter data-type: {type(filter_value)}" ) - where_str += ( - f" JSON_VALUE({self.metadata_column}, '$.{key}')" - f" {operator} {sql_param}" + selector = ( + f' "{key}"' + if key in self.specific_metadata_columns + else f"JSON_VALUE({self.metadata_column}, '$.{key}')" ) + where_str += f"{selector} " f"{operator} {sql_param}" return where_str, query_tuple diff --git a/libs/community/langchain_community/vectorstores/jaguar.py b/libs/community/langchain_community/vectorstores/jaguar.py index 3a6bae51f9e..a7a5556428c 100644 --- a/libs/community/langchain_community/vectorstores/jaguar.py +++ b/libs/community/langchain_community/vectorstores/jaguar.py @@ -431,7 +431,7 @@ class Jaguar(VectorStore): def prt(self, msg: str) -> None: with open("/tmp/debugjaguar.log", "a") as file: - print(f"msg={msg}", file=file, flush=True) # noqa: T201 + print(f"msg={msg}", file=file, flush=True) def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]: filepath = "" diff --git a/libs/community/langchain_community/vectorstores/manticore_search.py b/libs/community/langchain_community/vectorstores/manticore_search.py new file mode 100644 index 00000000000..edafb8bebdb --- /dev/null +++ b/libs/community/langchain_community/vectorstores/manticore_search.py @@ -0,0 +1,372 @@ +from __future__ import annotations + +import json +import logging +import uuid +from hashlib import sha1 +from typing import Any, Dict, Iterable, List, Optional, Type + +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.pydantic_v1 import BaseSettings +from langchain_core.vectorstores import VectorStore + +logger = logging.getLogger() +DEFAULT_K = 4 # Number of Documents to return. + + +class ManticoreSearchSettings(BaseSettings): + proto: str = "http" + host: str = "localhost" + port: int = 9308 + + username: Optional[str] = None + password: Optional[str] = None + + # database: str = "Manticore" + table: str = "langchain" + + column_map: Dict[str, str] = { + "id": "id", + "uuid": "uuid", + "document": "document", + "embedding": "embedding", + "metadata": "metadata", + } + + # A mandatory setting; currently, only hnsw is supported. + knn_type: str = "hnsw" + + # A mandatory setting that specifies the dimensions of the vectors being indexed. + knn_dims: Optional[int] = None # Defaults autodetect + + # A mandatory setting that specifies the distance function used by the HNSW index. + hnsw_similarity: str = "L2" # Acceptable values are: L2, IP, COSINE + + # An optional setting that defines the maximum amount of outgoing connections + # in the graph. + hnsw_m: int = 16 # The default is 16. + + # An optional setting that defines a construction time/accuracy trade-off. + hnsw_ef_construction = 100 + + def get_connection_string(self) -> str: + return self.proto + "://" + self.host + ":" + str(self.port) + + def __getitem__(self, item: str) -> Any: + return getattr(self, item) + + class Config: + env_file = ".env" + env_prefix = "manticore_" + env_file_encoding = "utf-8" + + +class ManticoreSearch(VectorStore): + """ + `ManticoreSearch Engine` vector store. + + To use, you should have the ``manticoresearch`` python package installed. + + Example: + .. code-block:: python + + from langchain_community.vectorstores import Manticore + from langchain_community.embeddings.openai import OpenAIEmbeddings + + embeddings = OpenAIEmbeddings() + vectorstore = ManticoreSearch(embeddings) + """ + + def __init__( + self, + embedding: Embeddings, + *, + config: Optional[ManticoreSearchSettings] = None, + **kwargs: Any, + ) -> None: + """ + ManticoreSearch Wrapper to LangChain + + Args: + embedding (Embeddings): Text embedding model. + config (ManticoreSearchSettings): Configuration of ManticoreSearch Client + **kwargs: Other keyword arguments will pass into Configuration of API client + manticoresearch-python. See + https://github.com/manticoresoftware/manticoresearch-python for more. + """ + try: + import manticoresearch.api as ENDPOINTS + import manticoresearch.api_client as API + except ImportError: + raise ImportError( + "Could not import manticoresearch python package. " + "Please install it with `pip install manticoresearch-dev`." + ) + + try: + from tqdm import tqdm + + self.pgbar = tqdm + except ImportError: + # Just in case if tqdm is not installed + self.pgbar = lambda x, **kwargs: x + + super().__init__() + + self.embedding = embedding + if config is not None: + self.config = config + else: + self.config = ManticoreSearchSettings() + + assert self.config + assert self.config.host and self.config.port + assert ( + self.config.column_map + # and self.config.database + and self.config.table + ) + + assert ( + self.config.knn_type + # and self.config.knn_dims + # and self.config.hnsw_m + # and self.config.hnsw_ef_construction + and self.config.hnsw_similarity + ) + + for k in ["id", "embedding", "document", "metadata", "uuid"]: + assert k in self.config.column_map + + # Detect embeddings dimension + if self.config.knn_dims is None: + self.dim: int = len(self.embedding.embed_query("test")) + else: + self.dim = self.config.knn_dims + + # Initialize the schema + self.schema = f"""\ +CREATE TABLE IF NOT EXISTS {self.config.table}( + {self.config.column_map['id']} bigint, + {self.config.column_map['document']} text indexed stored, + {self.config.column_map['embedding']} \ + float_vector knn_type='{self.config.knn_type}' \ + knn_dims='{self.dim}' \ + hnsw_similarity='{self.config.hnsw_similarity}' \ + hnsw_m='{self.config.hnsw_m}' \ + hnsw_ef_construction='{self.config.hnsw_ef_construction}', + {self.config.column_map['metadata']} json, + {self.config.column_map['uuid']} text indexed stored +)\ +""" + + # Create a connection to ManticoreSearch + self.configuration = API.Configuration( + host=self.config.get_connection_string(), + username=self.config.username, + password=self.config.password, + # disabled_client_side_validations=",", + **kwargs, + ) + self.connection = API.ApiClient(self.configuration) + self.client = { + "index": ENDPOINTS.IndexApi(self.connection), + "utils": ENDPOINTS.UtilsApi(self.connection), + "search": ENDPOINTS.SearchApi(self.connection), + } + + # Create default schema if not exists + self.client["utils"].sql(self.schema) + + @property + def embeddings(self) -> Embeddings: + return self.embedding + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + *, + batch_size: int = 32, + text_ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + """ + Insert more texts through the embeddings and add to the VectorStore. + + Args: + texts: Iterable of strings to add to the VectorStore + metadata: Optional column data to be inserted + batch_size: Batch size of insertion + ids: Optional list of ids to associate with the texts + + Returns: + List of ids from adding the texts into the VectorStore. + """ + # Embed and create the documents + ids = text_ids or [ + # See https://stackoverflow.com/questions/67219691/python-hash-function-that-returns-32-or-64-bits + str(int(sha1(t.encode("utf-8")).hexdigest()[:15], 16)) + for t in texts + ] + transac = [] + for i, text in enumerate(texts): + embed = self.embeddings.embed_query(text) + doc_uuid = str(uuid.uuid1()) + doc = { + self.config.column_map["document"]: text, + self.config.column_map["embedding"]: embed, + self.config.column_map["metadata"]: metadatas[i] if metadatas else {}, + self.config.column_map["uuid"]: doc_uuid, + } + transac.append( + {"replace": {"index": self.config.table, "id": ids[i], "doc": doc}} + ) + + if len(transac) == batch_size: + body = "\n".join(map(json.dumps, transac)) + try: + self.client["index"].bulk(body) + transac = [] + except Exception as e: + logger.info(f"Error indexing documents: {e}") + + if len(transac) > 0: + body = "\n".join(map(json.dumps, transac)) + try: + self.client["index"].bulk(body) + except Exception as e: + logger.info(f"Error indexing documents: {e}") + + return ids + + @classmethod + def from_texts( + cls: Type[ManticoreSearch], + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[Dict[Any, Any]]] = None, + *, + config: Optional[ManticoreSearchSettings] = None, + text_ids: Optional[List[str]] = None, + batch_size: int = 32, + **kwargs: Any, + ) -> ManticoreSearch: + ctx = cls(embedding, config=config, **kwargs) + ctx.add_texts( + texts=texts, + embedding=embedding, + text_ids=text_ids, + batch_size=batch_size, + metadatas=metadatas, + **kwargs, + ) + return ctx + + @classmethod + def from_documents( + cls: Type[ManticoreSearch], + documents: List[Document], + embedding: Embeddings, + *, + config: Optional[ManticoreSearchSettings] = None, + text_ids: Optional[List[str]] = None, + batch_size: int = 32, + **kwargs: Any, + ) -> ManticoreSearch: + texts = [doc.page_content for doc in documents] + metadatas = [doc.metadata for doc in documents] + return cls.from_texts( + texts=texts, + embedding=embedding, + text_ids=text_ids, + batch_size=batch_size, + metadatas=metadatas, + **kwargs, + ) + + def __repr__(self) -> str: + """ + Text representation for ManticoreSearch Vector Store, prints backends, username + and schemas. Easy to use with `str(ManticoreSearch())` + + Returns: + repr: string to show connection info and data schema + """ + _repr = f"\033[92m\033[1m{self.config.table} @ " + _repr += f"http://{self.config.host}:{self.config.port}\033[0m\n\n" + _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" + _repr += "-" * 51 + "\n" + for r in self.client["utils"].sql(f"DESCRIBE {self.config.table}")[0]["data"]: + _repr += ( + f"|\033[94m{r['Field']:24s}\033[0m|\033[" + f"96m{r['Type'] + ' ' + r['Properties']:24s}\033[0m|\n" + ) + _repr += "-" * 51 + "\n" + return _repr + + def similarity_search( + self, query: str, k: int = DEFAULT_K, **kwargs: Any + ) -> List[Document]: + """Perform a similarity search with ManticoreSearch + + Args: + query (str): query string + k (int, optional): Top K neighbors to retrieve. Defaults to 4. + + Returns: + List[Document]: List of Documents + """ + return self.similarity_search_by_vector( + self.embedding.embed_query(query), k, **kwargs + ) + + def similarity_search_by_vector( + self, + embedding: List[float], + k: int = DEFAULT_K, + **kwargs: Any, + ) -> List[Document]: + """Perform a similarity search with ManticoreSearch by vectors + + Args: + embedding (List[float]): Embedding vector + k (int, optional): Top K neighbors to retrieve. Defaults to 4. + + Returns: + List[Document]: List of documents + """ + + # Build search request + request = { + "index": self.config.table, + "knn": { + "field": self.config.column_map["embedding"], + "k": k, + "query_vector": embedding, + }, + } + + # Execute request and convert response to langchain.Document format + try: + return [ + Document( + page_content=r["_source"][self.config.column_map["document"]], + metadata=r["_source"][self.config.column_map["metadata"]], + ) + for r in self.client["search"].search(request, **kwargs).hits.hits[:k] + ] + except Exception as e: + logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") + return [] + + def drop(self) -> None: + """ + Helper function: Drop data + """ + self.client["utils"].sql(f"DROP TABLE IF EXISTS {self.config.table}") + + @property + def metadata_column(self) -> str: + return self.config.column_map["metadata"] diff --git a/libs/community/langchain_community/vectorstores/meilisearch.py b/libs/community/langchain_community/vectorstores/meilisearch.py index 522f107405d..885d4d5cff4 100644 --- a/libs/community/langchain_community/vectorstores/meilisearch.py +++ b/libs/community/langchain_community/vectorstores/meilisearch.py @@ -243,6 +243,7 @@ class Meilisearch(VectorStore): "hybrid": {"semanticRatio": 1.0, "embedder": embedder_name}, "limit": k, "filter": filter, + "showRankingScore": True, }, ) @@ -250,7 +251,7 @@ class Meilisearch(VectorStore): metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) - semantic_score = result["_semanticScore"] + semantic_score = result["_rankingScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) diff --git a/libs/community/langchain_community/vectorstores/milvus.py b/libs/community/langchain_community/vectorstores/milvus.py index 28d1dcc0f8e..6af20a5d4cc 100644 --- a/libs/community/langchain_community/vectorstores/milvus.py +++ b/libs/community/langchain_community/vectorstores/milvus.py @@ -5,6 +5,7 @@ from typing import Any, Iterable, List, Optional, Tuple, Union from uuid import uuid4 import numpy as np +from langchain_core._api.deprecation import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore @@ -22,6 +23,11 @@ DEFAULT_MILVUS_CONNECTION = { } +@deprecated( + since="0.2.0", + removal="0.3.0", + alternative_import="langchain_milvus.MilvusVectorStore", +) class Milvus(VectorStore): """`Milvus` vector store. @@ -62,7 +68,7 @@ class Milvus(VectorStore): primary_field (str): Name of the primary key field. Defaults to "pk". text_field (str): Name of the text field. Defaults to "text". vector_field (str): Name of the vector field. Defaults to "vector". - metadata_field (str): Name of the metadta field. Defaults to None. + metadata_field (str): Name of the metadata field. Defaults to None. When metadata_field is specified, the document's metadata will store as json. diff --git a/libs/community/langchain_community/vectorstores/momento_vector_index.py b/libs/community/langchain_community/vectorstores/momento_vector_index.py index 4e2e03dd79f..ebfa3c4d0e9 100644 --- a/libs/community/langchain_community/vectorstores/momento_vector_index.py +++ b/libs/community/langchain_community/vectorstores/momento_vector_index.py @@ -397,7 +397,7 @@ class MomentoVectorIndex(VectorStore): ) selected = [response.hits[i].metadata for i in mmr_selected] return [ - Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore # noqa: E501 + Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore for metadata in selected ] diff --git a/libs/community/langchain_community/vectorstores/qdrant.py b/libs/community/langchain_community/vectorstores/qdrant.py index 799007530b3..6d02332cebd 100644 --- a/libs/community/langchain_community/vectorstores/qdrant.py +++ b/libs/community/langchain_community/vectorstores/qdrant.py @@ -1681,7 +1681,7 @@ class Qdrant(VectorStore): f"Existing Qdrant collection {collection_name} uses named vectors. " f"If you want to reuse it, please set `vector_name` to any of the " f"existing named vectors: " - f"{', '.join(current_vector_config.keys())}." # noqa + f"{', '.join(current_vector_config.keys())}." f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) @@ -1846,7 +1846,7 @@ class Qdrant(VectorStore): f"Existing Qdrant collection {collection_name} uses named vectors. " f"If you want to reuse it, please set `vector_name` to any of the " f"existing named vectors: " - f"{', '.join(current_vector_config.keys())}." # noqa + f"{', '.join(current_vector_config.keys())}." f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) diff --git a/libs/community/langchain_community/vectorstores/redis/base.py b/libs/community/langchain_community/vectorstores/redis/base.py index c9a19f8e7ee..8a885aff397 100644 --- a/libs/community/langchain_community/vectorstores/redis/base.py +++ b/libs/community/langchain_community/vectorstores/redis/base.py @@ -383,7 +383,7 @@ class Redis(VectorStore): # type check for metadata if metadatas: - if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore # noqa: E501 + if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore raise ValueError("Number of metadatas must match number of texts") if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)): raise ValueError("Metadatas must be a list of dicts") @@ -704,7 +704,7 @@ class Redis(VectorStore): # type check for metadata if metadatas: - if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore # noqa: E501 + if isinstance(metadatas, list) and len(metadatas) != len(texts): # type: ignore raise ValueError("Number of metadatas must match number of texts") if not (isinstance(metadatas, list) and isinstance(metadatas[0], dict)): raise ValueError("Metadatas must be a list of dicts") @@ -832,7 +832,7 @@ class Redis(VectorStore): # Perform vector search # ignore type because redis-py is wrong about bytes try: - results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore # noqa: E501 + results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore except redis.exceptions.ResponseError as e: # split error message and see if it starts with "Syntax" if str(e).split(" ")[0] == "Syntax": @@ -947,7 +947,7 @@ class Redis(VectorStore): # Perform vector search # ignore type because redis-py is wrong about bytes try: - results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore # noqa: E501 + results = self.client.ft(self.index_name).search(redis_query, params_dict) # type: ignore except redis.exceptions.ResponseError as e: # split error message and see if it starts with "Syntax" if str(e).split(" ")[0] == "Syntax": diff --git a/libs/community/langchain_community/vectorstores/surrealdb.py b/libs/community/langchain_community/vectorstores/surrealdb.py index 7e48ea1e295..60db49130b1 100644 --- a/libs/community/langchain_community/vectorstores/surrealdb.py +++ b/libs/community/langchain_community/vectorstores/surrealdb.py @@ -1,16 +1,22 @@ import asyncio from typing import ( Any, + Dict, Iterable, List, Optional, Tuple, ) +import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore +from langchain_community.vectorstores.utils import maximal_marginal_relevance + +DEFAULT_K = 4 # Number of Documents to return. + class SurrealDBStore(VectorStore): """ @@ -202,14 +208,20 @@ class SurrealDBStore(VectorStore): return asyncio.run(_delete(ids, **kwargs)) async def _asimilarity_search_by_vector_with_score( - self, embedding: List[float], k: int = 4, **kwargs: Any - ) -> List[Tuple[Document, float]]: + self, + embedding: List[float], + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float, Any]]: """Run similarity search for query embedding asynchronously and return documents and scores Args: embedding (List[float]): Query embedding. k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with scores @@ -220,14 +232,29 @@ class SurrealDBStore(VectorStore): "k": k, "score_threshold": kwargs.get("score_threshold", 0), } + + # build additional filter criteria + custom_filter = "" + if filter: + for key in filter: + # check value type + if type(filter[key]) in [str, bool]: + filter_value = f"'{filter[key]}'" + else: + filter_value = f"{filter[key]}" + + custom_filter += f"and metadata.{key} = {filter_value} " + query = f""" select id, text, metadata, + embedding, vector::similarity::cosine(embedding, $embedding) as similarity from ⟨{args["collection"]}⟩ where vector::similarity::cosine(embedding, $embedding) >= $score_threshold + {custom_filter} order by similarity desc LIMIT $k; """ results = await self.sdb.query(query, args) @@ -247,21 +274,28 @@ class SurrealDBStore(VectorStore): ( Document( page_content=doc["text"], - metadata={"id": doc["id"], **(doc.get("metadata", None) or {})}, + metadata={"id": doc["id"], **(doc.get("metadata") or {})}, ), doc["similarity"], + doc["embedding"], ) for doc in result["result"] ] async def asimilarity_search_with_relevance_scores( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search asynchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance scores @@ -269,21 +303,27 @@ class SurrealDBStore(VectorStore): query_embedding = self.embedding_function.embed_query(query) return [ (document, similarity) - for document, similarity in ( + for document, similarity, _ in ( await self._asimilarity_search_by_vector_with_score( - query_embedding, k, **kwargs + query_embedding, k, filter=filter, **kwargs ) ) ] def similarity_search_with_relevance_scores( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search synchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance scores @@ -294,19 +334,25 @@ class SurrealDBStore(VectorStore): ): await self.initialize() return await self.asimilarity_search_with_relevance_scores( - query, k, **kwargs + query, k, filter=filter, **kwargs ) return asyncio.run(_similarity_search_with_relevance_scores()) async def asimilarity_search_with_score( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search asynchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance distance scores @@ -314,21 +360,27 @@ class SurrealDBStore(VectorStore): query_embedding = self.embedding_function.embed_query(query) return [ (document, similarity) - for document, similarity in ( + for document, similarity, _ in ( await self._asimilarity_search_by_vector_with_score( - query_embedding, k, **kwargs + query_embedding, k, filter=filter, **kwargs ) ) ] def similarity_search_with_score( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search synchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance distance scores @@ -336,37 +388,51 @@ class SurrealDBStore(VectorStore): async def _similarity_search_with_score() -> List[Tuple[Document, float]]: await self.initialize() - return await self.asimilarity_search_with_score(query, k, **kwargs) + return await self.asimilarity_search_with_score( + query, k, filter=filter, **kwargs + ) return asyncio.run(_similarity_search_with_score()) async def asimilarity_search_by_vector( - self, embedding: List[float], k: int = 4, **kwargs: Any + self, + embedding: List[float], + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Document]: """Run similarity search on query embedding asynchronously Args: embedding (List[float]): Query embedding k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query """ return [ document - for document, _ in await self._asimilarity_search_by_vector_with_score( - embedding, k, **kwargs + for document, _, _ in await self._asimilarity_search_by_vector_with_score( + embedding, k, filter=filter, **kwargs ) ] def similarity_search_by_vector( - self, embedding: List[float], k: int = 4, **kwargs: Any + self, + embedding: List[float], + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Document]: """Run similarity search on query embedding Args: embedding (List[float]): Query embedding k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query @@ -374,33 +440,49 @@ class SurrealDBStore(VectorStore): async def _similarity_search_by_vector() -> List[Document]: await self.initialize() - return await self.asimilarity_search_by_vector(embedding, k, **kwargs) + return await self.asimilarity_search_by_vector( + embedding, k, filter=filter, **kwargs + ) return asyncio.run(_similarity_search_by_vector()) async def asimilarity_search( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Document]: """Run similarity search on query asynchronously Args: query (str): Query k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query """ query_embedding = self.embedding_function.embed_query(query) - return await self.asimilarity_search_by_vector(query_embedding, k, **kwargs) + return await self.asimilarity_search_by_vector( + query_embedding, k, filter=filter, **kwargs + ) def similarity_search( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = DEFAULT_K, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, ) -> List[Document]: """Run similarity search on query Args: query (str): Query k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query @@ -408,10 +490,164 @@ class SurrealDBStore(VectorStore): async def _similarity_search() -> List[Document]: await self.initialize() - return await self.asimilarity_search(query, k, **kwargs) + return await self.asimilarity_search(query, k, filter=filter, **kwargs) return asyncio.run(_similarity_search()) + async def amax_marginal_relevance_search_by_vector( + self, + embedding: List[float], + k: int = DEFAULT_K, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance. + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + + result = await self._asimilarity_search_by_vector_with_score( + embedding, fetch_k, filter=filter, **kwargs + ) + + # extract only document from result + docs = [sub[0] for sub in result] + # extract only embedding from result + embeddings = [sub[-1] for sub in result] + + mmr_selected = maximal_marginal_relevance( + np.array(embedding, dtype=np.float32), + embeddings, + k=k, + lambda_mult=lambda_mult, + ) + + return [docs[i] for i in mmr_selected] + + def max_marginal_relevance_search_by_vector( + self, + embedding: List[float], + k: int = DEFAULT_K, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + + async def _max_marginal_relevance_search_by_vector() -> List[Document]: + await self.initialize() + return await self.amax_marginal_relevance_search_by_vector( + embedding, k, fetch_k, lambda_mult, filter=filter, **kwargs + ) + + return asyncio.run(_max_marginal_relevance_search_by_vector()) + + async def amax_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + + embedding = self.embedding_function.embed_query(query) + docs = await self.amax_marginal_relevance_search_by_vector( + embedding, k, fetch_k, lambda_mult, filter=filter, **kwargs + ) + return docs + + def max_marginal_relevance_search( + self, + query: str, + k: int = DEFAULT_K, + fetch_k: int = 20, + lambda_mult: float = 0.5, + *, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance. + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents selected by maximal marginal relevance. + """ + + async def _max_marginal_relevance_search() -> List[Document]: + await self.initialize() + return await self.amax_marginal_relevance_search( + query, k, fetch_k, lambda_mult, filter=filter, **kwargs + ) + + return asyncio.run(_max_marginal_relevance_search()) + @classmethod async def afrom_texts( cls, diff --git a/libs/community/langchain_community/vectorstores/timescalevector.py b/libs/community/langchain_community/vectorstores/timescalevector.py index 5e3e3c41ad4..47931634187 100644 --- a/libs/community/langchain_community/vectorstores/timescalevector.py +++ b/libs/community/langchain_community/vectorstores/timescalevector.py @@ -66,7 +66,7 @@ class TimescaleVector(VectorStore): collection_name=COLLECTION_NAME, service_url=SERVICE_URL, ) - """ # noqa: E501 + """ def __init__( self, diff --git a/libs/community/langchain_community/vectorstores/zep_cloud.py b/libs/community/langchain_community/vectorstores/zep_cloud.py new file mode 100644 index 00000000000..052340e4fcd --- /dev/null +++ b/libs/community/langchain_community/vectorstores/zep_cloud.py @@ -0,0 +1,477 @@ +from __future__ import annotations + +import logging +import warnings +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple + +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.vectorstores import VectorStore + +if TYPE_CHECKING: + from zep_cloud import CreateDocumentRequest, DocumentCollectionResponse, SearchType + +logger = logging.getLogger() + + +class ZepCloudVectorStore(VectorStore): + """`Zep` vector store. + + It provides methods for adding texts or documents to the store, + searching for similar documents, and deleting documents. + + Search scores are calculated using cosine similarity normalized to [0, 1]. + + Args: + collection_name (str): The name of the collection in the Zep store. + api_key (str): The API key for the Zep API. + """ + + def __init__( + self, + collection_name: str, + api_key: str, + ) -> None: + super().__init__() + if not collection_name: + raise ValueError( + "collection_name must be specified when using ZepVectorStore." + ) + try: + from zep_cloud.client import AsyncZep, Zep + except ImportError: + raise ImportError( + "Could not import zep-python python package. " + "Please install it with `pip install zep-python`." + ) + self._client = Zep(api_key=api_key) + self._client_async = AsyncZep(api_key=api_key) + + self.collection_name = collection_name + + self._load_collection() + + @property + def embeddings(self) -> Optional[Embeddings]: + """Unavailable for ZepCloud""" + return None + + def _load_collection(self) -> DocumentCollectionResponse: + """ + Load the collection from the Zep backend. + """ + from zep_cloud import NotFoundError + + try: + collection = self._client.document.get_collection(self.collection_name) + except NotFoundError: + logger.info( + f"Collection {self.collection_name} not found. Creating new collection." + ) + collection = self._create_collection() + + return collection + + def _create_collection(self) -> DocumentCollectionResponse: + """ + Create a new collection in the Zep backend. + """ + self._client.document.add_collection(self.collection_name) + collection = self._client.document.get_collection(self.collection_name) + return collection + + def _generate_documents_to_add( + self, + texts: Iterable[str], + metadatas: Optional[List[Dict[Any, Any]]] = None, + document_ids: Optional[List[str]] = None, + ) -> List[CreateDocumentRequest]: + from zep_cloud import CreateDocumentRequest as ZepDocument + + documents: List[ZepDocument] = [] + for i, d in enumerate(texts): + documents.append( + ZepDocument( + content=d, + metadata=metadatas[i] if metadatas else None, + document_id=document_ids[i] if document_ids else None, + ) + ) + return documents + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[Dict[str, Any]]] = None, + document_ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + """Run more texts through the embeddings and add to the vectorstore. + + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + document_ids: Optional list of document ids associated with the texts. + kwargs: vectorstore specific parameters + + Returns: + List of ids from adding the texts into the vectorstore. + """ + + documents = self._generate_documents_to_add(texts, metadatas, document_ids) + uuids = self._client.document.add_documents( + self.collection_name, request=documents + ) + + return uuids + + async def aadd_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[Dict[str, Any]]] = None, + document_ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + """Run more texts through the embeddings and add to the vectorstore.""" + documents = self._generate_documents_to_add(texts, metadatas, document_ids) + uuids = await self._client_async.document.add_documents( + self.collection_name, request=documents + ) + + return uuids + + def search( + self, + query: str, + search_type: SearchType, + metadata: Optional[Dict[str, Any]] = None, + k: int = 3, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to query using specified search type.""" + if search_type == "similarity": + return self.similarity_search(query, k=k, metadata=metadata, **kwargs) + elif search_type == "mmr": + return self.max_marginal_relevance_search( + query, k=k, metadata=metadata, **kwargs + ) + else: + raise ValueError( + f"search_type of {search_type} not allowed. Expected " + "search_type to be 'similarity' or 'mmr'." + ) + + async def asearch( + self, + query: str, + search_type: str, + metadata: Optional[Dict[str, Any]] = None, + k: int = 3, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to query using specified search type.""" + if search_type == "similarity": + return await self.asimilarity_search( + query, k=k, metadata=metadata, **kwargs + ) + elif search_type == "mmr": + return await self.amax_marginal_relevance_search( + query, k=k, metadata=metadata, **kwargs + ) + else: + raise ValueError( + f"search_type of {search_type} not allowed. Expected " + "search_type to be 'similarity' or 'mmr'." + ) + + def similarity_search( + self, + query: str, + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to query.""" + + results = self._similarity_search_with_relevance_scores( + query, k=k, metadata=metadata, **kwargs + ) + return [doc for doc, _ in results] + + def similarity_search_with_score( + self, + query: str, + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Run similarity search with distance.""" + + return self._similarity_search_with_relevance_scores( + query, k=k, metadata=metadata, **kwargs + ) + + def _similarity_search_with_relevance_scores( + self, + query: str, + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """ + Default similarity search with relevance scores. Modify if necessary + in subclass. + Return docs and relevance scores in the range [0, 1]. + + 0 is dissimilar, 1 is most similar. + + Args: + query: input text + k: Number of Documents to return. Defaults to 4. + metadata: Optional, metadata filter + **kwargs: kwargs to be passed to similarity search. Should include: + score_threshold: Optional, a floating point value between 0 to 1 and + filter the resulting set of retrieved docs + + Returns: + List of Tuples of (doc, similarity_score) + """ + + results = self._client.document.search( + collection_name=self.collection_name, + text=query, + limit=k, + metadata=metadata, + **kwargs, + ) + + return [ + ( + Document( + page_content=str(doc.content), + metadata=doc.metadata, + ), + doc.score or 0.0, + ) + for doc in results.results or [] + ] + + async def asimilarity_search_with_relevance_scores( + self, + query: str, + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query.""" + + results = await self._client_async.document.search( + collection_name=self.collection_name, + text=query, + limit=k, + metadata=metadata, + **kwargs, + ) + + return [ + ( + Document( + page_content=str(doc.content), + metadata=doc.metadata, + ), + doc.score or 0.0, + ) + for doc in results.results or [] + ] + + async def asimilarity_search( + self, + query: str, + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to query.""" + + results = await self.asimilarity_search_with_relevance_scores( + query, k, metadata=metadata, **kwargs + ) + + return [doc for doc, _ in results] + + def similarity_search_by_vector( + self, + embedding: List[float], + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Unsupported in Zep Cloud""" + warnings.warn("similarity_search_by_vector is not supported in Zep Cloud") + return [] + + async def asimilarity_search_by_vector( + self, + embedding: List[float], + k: int = 4, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Unsupported in Zep Cloud""" + warnings.warn("asimilarity_search_by_vector is not supported in Zep Cloud") + return [] + + def max_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. + Zep determines this automatically and this parameter is + ignored. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + metadata: Optional, metadata to filter the resulting set of retrieved docs + Returns: + List of Documents selected by maximal marginal relevance. + """ + + results = self._client.document.search( + collection_name=self.collection_name, + text=query, + limit=k, + metadata=metadata, + search_type="mmr", + mmr_lambda=lambda_mult, + **kwargs, + ) + + return [ + Document(page_content=str(d.content), metadata=d.metadata) + for d in results.results or [] + ] + + async def amax_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance.""" + + results = await self._client_async.document.search( + collection_name=self.collection_name, + text=query, + limit=k, + metadata=metadata, + search_type="mmr", + mmr_lambda=lambda_mult, + **kwargs, + ) + + return [ + Document(page_content=str(d.content), metadata=d.metadata) + for d in results.results or [] + ] + + def max_marginal_relevance_search_by_vector( + self, + embedding: List[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Unsupported in Zep Cloud""" + warnings.warn( + "max_marginal_relevance_search_by_vector is not supported in Zep Cloud" + ) + return [] + + async def amax_marginal_relevance_search_by_vector( + self, + embedding: List[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + """Unsupported in Zep Cloud""" + warnings.warn( + "amax_marginal_relevance_search_by_vector is not supported in Zep Cloud" + ) + return [] + + @classmethod + def from_texts( + cls, + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + collection_name: str = "", + api_key: Optional[str] = None, + **kwargs: Any, + ) -> ZepCloudVectorStore: + """ + Class method that returns a ZepVectorStore instance initialized from texts. + + If the collection does not exist, it will be created. + + Args: + texts (List[str]): The list of texts to add to the vectorstore. + metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata + associated with the texts. + collection_name (str): The name of the collection in the Zep store. + api_key (str): The API key for the Zep API. + **kwargs: Additional parameters specific to the vectorstore. + + Returns: + ZepVectorStore: An instance of ZepVectorStore. + """ + if not api_key: + raise ValueError("api_key must be specified when using ZepVectorStore.") + vecstore = cls( + collection_name=collection_name, + api_key=api_key, + ) + vecstore.add_texts(texts, metadatas) + return vecstore + + def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: + """Delete by Zep vector UUIDs. + + Parameters + ---------- + ids : Optional[List[str]] + The UUIDs of the vectors to delete. + + Raises + ------ + ValueError + If no UUIDs are provided. + """ + + if ids is None or len(ids) == 0: + raise ValueError("No uuids provided to delete.") + + for u in ids: + self._client.document.delete_document(self.collection_name, u) diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock index 915ff9e4bd7..727ac1f605c 100644 --- a/libs/community/poetry.lock +++ b/libs/community/poetry.lock @@ -12,21 +12,6 @@ files = [ {file = "aenum-3.1.15.tar.gz", hash = "sha256:8cbd76cd18c4f870ff39b24284d3ea028fbe8731a58df3aa581e434c575b9559"}, ] -[[package]] -name = "aerospike-vector-search" -version = "0.6.1" -description = "Aerospike Vector Search Client Library for Python" -optional = true -python-versions = ">3.8" -files = [ - {file = "aerospike-vector-search-0.6.1.tar.gz", hash = "sha256:1d3dcf84221a08434a0b2fb4bbac040b3718a169cdd7e44a725eae2fdbad6a43"}, - {file = "aerospike_vector_search-0.6.1-py3-none-any.whl", hash = "sha256:cc7cc7c829f218c4ee9ccd93ca0ecad7104d81deac236309dcdf87e9c399fd35"}, -] - -[package.dependencies] -grpcio = "*" -protobuf = "*" - [[package]] name = "aiodns" version = "3.1.1" @@ -1065,6 +1050,26 @@ click = ">=4.0" [package.extras] test = ["pytest-cov"] +[[package]] +name = "cloudpathlib" +version = "0.18.1" +description = "pathlib-style classes for cloud storage services." +optional = true +python-versions = ">=3.7" +files = [ + {file = "cloudpathlib-0.18.1-py3-none-any.whl", hash = "sha256:20efd5d772c75df91bb2ac52e053be53fd9000f5e9755fd92375a2a9fe6005e0"}, + {file = "cloudpathlib-0.18.1.tar.gz", hash = "sha256:ffd22f324bfbf9c3f2bc1bec6e8372cb372a0feef17c7f2b48030cd6810ea859"}, +] + +[package.dependencies] +typing_extensions = {version = ">4", markers = "python_version < \"3.11\""} + +[package.extras] +all = ["cloudpathlib[azure]", "cloudpathlib[gs]", "cloudpathlib[s3]"] +azure = ["azure-storage-blob (>=12)"] +gs = ["google-cloud-storage"] +s3 = ["boto3"] + [[package]] name = "cloudpickle" version = "3.0.0" @@ -1185,7 +1190,6 @@ files = [ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"}, {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"}, {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"}, - {file = "contourpy-1.1.0-cp310-cp310-win32.whl", hash = "sha256:9b2dd2ca3ac561aceef4c7c13ba654aaa404cf885b187427760d7f7d4c57cff8"}, {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"}, {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"}, {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"}, @@ -1194,7 +1198,6 @@ files = [ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"}, {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"}, {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"}, - {file = "contourpy-1.1.0-cp311-cp311-win32.whl", hash = "sha256:edb989d31065b1acef3828a3688f88b2abb799a7db891c9e282df5ec7e46221b"}, {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"}, {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"}, {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"}, @@ -1203,7 +1206,6 @@ files = [ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"}, {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"}, {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"}, - {file = "contourpy-1.1.0-cp38-cp38-win32.whl", hash = "sha256:108dfb5b3e731046a96c60bdc46a1a0ebee0760418951abecbe0fc07b5b93b27"}, {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"}, {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"}, {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"}, @@ -1212,7 +1214,6 @@ files = [ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"}, {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"}, {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"}, - {file = "contourpy-1.1.0-cp39-cp39-win32.whl", hash = "sha256:71551f9520f008b2950bef5f16b0e3587506ef4f23c734b71ffb7b89f8721999"}, {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"}, {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"}, {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"}, @@ -3470,6 +3471,7 @@ files = [ {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:227b178b22a7f91ae88525810441791b1ca1fc71c86f03190911793be15cec3d"}, {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:780eb6383fbae12afa819ef676fc93e1548ae4b076c004a393af26a04b460742"}, {file = "jq-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08ded6467f4ef89fec35b2bf310f210f8cd13fbd9d80e521500889edf8d22441"}, + {file = "jq-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49e44ed677713f4115bd5bf2dbae23baa4cd503be350e12a1c1f506b0687848f"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:984f33862af285ad3e41e23179ac4795f1701822473e1a26bf87ff023e5a89ea"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42264fafc6166efb5611b5d4cb01058887d050a6c19334f6a3f8a13bb369df5"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a67154f150aaf76cc1294032ed588436eb002097dd4fd1e283824bf753a05080"}, @@ -3553,7 +3555,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, - {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -4133,7 +4134,6 @@ files = [ {file = "lxml-4.9.4-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e8f9f93a23634cfafbad6e46ad7d09e0f4a25a2400e4a64b1b7b7c0fbaa06d9d"}, {file = "lxml-4.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3f3f00a9061605725df1816f5713d10cd94636347ed651abdbc75828df302b20"}, {file = "lxml-4.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:953dd5481bd6252bd480d6ec431f61d7d87fdcbbb71b0d2bdcfc6ae00bb6fb10"}, - {file = "lxml-4.9.4-cp312-cp312-win32.whl", hash = "sha256:266f655d1baff9c47b52f529b5f6bec33f66042f65f7c56adde3fcf2ed62ae8b"}, {file = "lxml-4.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:f1faee2a831fe249e1bae9cbc68d3cd8a30f7e37851deee4d7962b17c410dd56"}, {file = "lxml-4.9.4-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:23d891e5bdc12e2e506e7d225d6aa929e0a0368c9916c1fddefab88166e98b20"}, {file = "lxml-4.9.4-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e96a1788f24d03e8d61679f9881a883ecdf9c445a38f9ae3f3f193ab6c591c66"}, @@ -6119,8 +6119,6 @@ files = [ {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, @@ -6163,7 +6161,6 @@ files = [ {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, @@ -6172,8 +6169,6 @@ files = [ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, @@ -6749,7 +6744,6 @@ python-versions = ">=3.8" files = [ {file = "PyMuPDFb-1.23.22-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9085a1e2fbf16f2820f9f7ad3d25e85f81d9b9eb0409110c1670d4cf5a27a678"}, {file = "PyMuPDFb-1.23.22-py3-none-macosx_11_0_arm64.whl", hash = "sha256:01016dd33220cef4ecaf929d09fd27a584dc3ec3e5c9f4112dfe63613ea35135"}, - {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf50e814db91f2a2325219302fbac229a23682c372cf8232aabd51ea3f18210e"}, {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ffa713ad18e816e584c8a5f569995c32d22f8ac76ab6e4a61f2d2983c4b73d9"}, {file = "PyMuPDFb-1.23.22-py3-none-win32.whl", hash = "sha256:d00e372452845aea624659c302d25e935052269fd3aafe26948301576d6f2ee8"}, {file = "PyMuPDFb-1.23.22-py3-none-win_amd64.whl", hash = "sha256:7c9c157281fdee9f296e666a323307dbf74cb38f017921bb131fa7bfcd39c2bd"}, @@ -7158,7 +7152,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -7166,16 +7159,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -7192,7 +7177,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -7200,7 +7184,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -8364,9 +8347,7 @@ python-versions = ">=3.7" files = [ {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0b148ab0438f72ad21cb004ce3bdaafd28465c4276af66df3b9ecd2037bf252"}, {file = "SQLAlchemy-2.0.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbda76961eb8f27e6ad3c84d1dc56d5bc61ba8f02bd20fcf3450bd421c2fcc9c"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feea693c452d85ea0015ebe3bb9cd15b6f49acc1a31c28b3c50f4db0f8fb1e71"}, {file = "SQLAlchemy-2.0.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5da98815f82dce0cb31fd1e873a0cb30934971d15b74e0d78cf21f9e1b05953f"}, - {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5adf383c73f2d49ad15ff363a8748319ff84c371eed59ffd0127355d6ea1da"}, {file = "SQLAlchemy-2.0.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56856b871146bfead25fbcaed098269d90b744eea5cb32a952df00d542cdd368"}, {file = "SQLAlchemy-2.0.28-cp310-cp310-win32.whl", hash = "sha256:943aa74a11f5806ab68278284a4ddd282d3fb348a0e96db9b42cb81bf731acdc"}, {file = "SQLAlchemy-2.0.28-cp310-cp310-win_amd64.whl", hash = "sha256:c6c4da4843e0dabde41b8f2e8147438330924114f541949e6318358a56d1875a"}, @@ -8387,25 +8368,19 @@ files = [ {file = "SQLAlchemy-2.0.28-cp312-cp312-win32.whl", hash = "sha256:a921002be69ac3ab2cf0c3017c4e6a3377f800f1fca7f254c13b5f1a2f10022c"}, {file = "SQLAlchemy-2.0.28-cp312-cp312-win_amd64.whl", hash = "sha256:b4a2cf92995635b64876dc141af0ef089c6eea7e05898d8d8865e71a326c0385"}, {file = "SQLAlchemy-2.0.28-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e91b5e341f8c7f1e5020db8e5602f3ed045a29f8e27f7f565e0bdee3338f2c7"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45c7b78dfc7278329f27be02c44abc0d69fe235495bb8e16ec7ef1b1a17952db"}, {file = "SQLAlchemy-2.0.28-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3eba73ef2c30695cb7eabcdb33bb3d0b878595737479e152468f3ba97a9c22a4"}, - {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5df5d1dafb8eee89384fb7a1f79128118bc0ba50ce0db27a40750f6f91aa99d5"}, {file = "SQLAlchemy-2.0.28-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2858bbab1681ee5406650202950dc8f00e83b06a198741b7c656e63818633526"}, {file = "SQLAlchemy-2.0.28-cp37-cp37m-win32.whl", hash = "sha256:9461802f2e965de5cff80c5a13bc945abea7edaa1d29360b485c3d2b56cdb075"}, {file = "SQLAlchemy-2.0.28-cp37-cp37m-win_amd64.whl", hash = "sha256:a6bec1c010a6d65b3ed88c863d56b9ea5eeefdf62b5e39cafd08c65f5ce5198b"}, {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:843a882cadebecc655a68bd9a5b8aa39b3c52f4a9a5572a3036fb1bb2ccdc197"}, {file = "SQLAlchemy-2.0.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dbb990612c36163c6072723523d2be7c3eb1517bbdd63fe50449f56afafd1133"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7e4baf9161d076b9a7e432fce06217b9bd90cfb8f1d543d6e8c4595627edb9"}, {file = "SQLAlchemy-2.0.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0a5354cb4de9b64bccb6ea33162cb83e03dbefa0d892db88a672f5aad638a75"}, - {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fffcc8edc508801ed2e6a4e7b0d150a62196fd28b4e16ab9f65192e8186102b6"}, {file = "SQLAlchemy-2.0.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca7b6d99a4541b2ebab4494f6c8c2f947e0df4ac859ced575238e1d6ca5716b"}, {file = "SQLAlchemy-2.0.28-cp38-cp38-win32.whl", hash = "sha256:8c7f10720fc34d14abad5b647bc8202202f4948498927d9f1b4df0fb1cf391b7"}, {file = "SQLAlchemy-2.0.28-cp38-cp38-win_amd64.whl", hash = "sha256:243feb6882b06a2af68ecf4bec8813d99452a1b62ba2be917ce6283852cf701b"}, {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc4974d3684f28b61b9a90fcb4c41fb340fd4b6a50c04365704a4da5a9603b05"}, {file = "SQLAlchemy-2.0.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87724e7ed2a936fdda2c05dbd99d395c91ea3c96f029a033a4a20e008dd876bf"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68722e6a550f5de2e3cfe9da6afb9a7dd15ef7032afa5651b0f0c6b3adb8815d"}, {file = "SQLAlchemy-2.0.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:328529f7c7f90adcd65aed06a161851f83f475c2f664a898af574893f55d9e53"}, - {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:df40c16a7e8be7413b885c9bf900d402918cc848be08a59b022478804ea076b8"}, {file = "SQLAlchemy-2.0.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:426f2fa71331a64f5132369ede5171c52fd1df1bd9727ce621f38b5b24f48750"}, {file = "SQLAlchemy-2.0.28-cp39-cp39-win32.whl", hash = "sha256:33157920b233bc542ce497a81a2e1452e685a11834c5763933b440fedd1d8e2d"}, {file = "SQLAlchemy-2.0.28-cp39-cp39-win_amd64.whl", hash = "sha256:2f60843068e432311c886c5f03c4664acaef507cf716f6c60d5fde7265be9d7b"}, @@ -10099,9 +10074,9 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [extras] cli = ["typer"] -extended-testing = ["aerospike-vector-search", "aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "assemblyai", "atlassian-python-api", "azure-ai-documentintelligence", "azure-identity", "azure-search-documents", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "cloudpickle", "cloudpickle", "cohere", "databricks-vectorsearch", "datasets", "dgml-utils", "elasticsearch", "esprima", "faiss-cpu", "feedparser", "fireworks-ai", "friendli-client", "geopandas", "gitpython", "google-cloud-documentai", "gql", "gradientai", "hdbcli", "hologres-vector", "html2text", "httpx", "httpx-sse", "javelin-sdk", "jinja2", "jq", "jsonschema", "lxml", "markdownify", "motor", "msal", "mwparserfromhell", "mwxml", "newspaper3k", "numexpr", "nvidia-riva-client", "oci", "openai", "openapi-pydantic", "oracle-ads", "oracledb", "pandas", "pdfminer-six", "pgvector", "praw", "premai", "psychicapi", "py-trello", "pyjwt", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "rapidocr-onnxruntime", "rdflib", "requests-toolbelt", "rspace_client", "scikit-learn", "sqlite-vss", "streamlit", "sympy", "telethon", "tidb-vector", "timescale-vector", "tqdm", "tree-sitter", "tree-sitter-languages", "upstash-redis", "vdms", "xata", "xmltodict"] +extended-testing = ["aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "assemblyai", "atlassian-python-api", "azure-ai-documentintelligence", "azure-identity", "azure-search-documents", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "cloudpathlib", "cloudpickle", "cloudpickle", "cohere", "databricks-vectorsearch", "datasets", "dgml-utils", "elasticsearch", "esprima", "faiss-cpu", "feedparser", "fireworks-ai", "friendli-client", "geopandas", "gitpython", "google-cloud-documentai", "gql", "gradientai", "hdbcli", "hologres-vector", "html2text", "httpx", "httpx-sse", "javelin-sdk", "jinja2", "jq", "jsonschema", "lxml", "markdownify", "motor", "msal", "mwparserfromhell", "mwxml", "newspaper3k", "numexpr", "nvidia-riva-client", "oci", "openai", "openapi-pydantic", "oracle-ads", "oracledb", "pandas", "pdfminer-six", "pgvector", "praw", "premai", "psychicapi", "py-trello", "pyjwt", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "rapidocr-onnxruntime", "rdflib", "requests-toolbelt", "rspace_client", "scikit-learn", "sqlite-vss", "streamlit", "sympy", "telethon", "tidb-vector", "timescale-vector", "tqdm", "tree-sitter", "tree-sitter-languages", "upstash-redis", "vdms", "xata", "xmltodict"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "6fbb50e2a8146f8fc2590c8de1a194c7bbc7dd2cfd3d2fd090247aadc01e63f1" +content-hash = "1a30f88ba6352cfd5af8d3b7b6418ec01bad42c73e6096b1a3ccef06cb36709b" diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index 64c50283543..4955db47647 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-community" -version = "0.2.0" +version = "0.2.1" description = "Community contributed LangChain integrations." authors = [] license = "MIT" @@ -104,7 +104,8 @@ vdms = {version = "^0.0.20", optional = true} httpx-sse = {version = "^0.4.0", optional = true} pyjwt = {version = "^2.8.0", optional = true} oracledb = {version = "^2.2.0", optional = true} -aerospike-vector-search = {version = "^0.6.1", optional = true} +cloudpathlib = { version = "^0.18", optional = true } + [tool.poetry.group.test] optional = true @@ -202,7 +203,6 @@ cli = ["typer"] # Please use new-line on formatting to make it easier to add new packages without # merge-conflicts extended_testing = [ - "aerospike-vector-search", "aleph-alpha-client", "aiosqlite", "assemblyai", @@ -267,6 +267,7 @@ extended_testing = [ "praw", "databricks-vectorsearch", "cloudpickle", + "cloudpathlib", "dgml-utils", "cohere", "tree-sitter", @@ -287,7 +288,7 @@ extended_testing = [ "vdms", "httpx-sse", "pyjwt", - "oracledb" + "oracledb", ] [tool.ruff] diff --git a/libs/community/scripts/lint_imports.sh b/libs/community/scripts/lint_imports.sh index 89af89514f5..97d9c96b031 100755 --- a/libs/community/scripts/lint_imports.sh +++ b/libs/community/scripts/lint_imports.sh @@ -8,6 +8,14 @@ errors=0 # make sure not importing from langchain or langchain_experimental git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1)) +# make sure no one is importing from the built-in xml library +# instead defusedxml should be used to avoid getting CVEs. +# Whether the standary library actually poses a risk to users +# is very nuanced and dependns on user's environment. +# https://docs.python.org/3/library/xml.etree.elementtree.html +git --no-pager grep '^from xml\.' . | grep -vE "# OK: user-must-opt-in" && errors=$((errors+1)) +git --no-pager grep '^import xml\.' . | grep -vE "# OK: user-must-opt-in" && errors=$((errors+1)) + # Decide on an exit status based on the errors if [ "$errors" -gt 0 ]; then exit 1 diff --git a/libs/community/tests/integration_tests/document_compressors/__init__.py b/libs/community/tests/integration_tests/document_compressors/__init__.py new file mode 100644 index 00000000000..7b0197f5939 --- /dev/null +++ b/libs/community/tests/integration_tests/document_compressors/__init__.py @@ -0,0 +1 @@ +"""Test document compressor integrations.""" diff --git a/libs/community/tests/integration_tests/document_compressors/test_rankllm_rerank.py b/libs/community/tests/integration_tests/document_compressors/test_rankllm_rerank.py new file mode 100644 index 00000000000..46cb8b81be6 --- /dev/null +++ b/libs/community/tests/integration_tests/document_compressors/test_rankllm_rerank.py @@ -0,0 +1,8 @@ +"""Test rankllm reranker.""" + +from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank + + +def test_rankllm_reranker_init() -> None: + """Test the RankLLM reranker initializes correctly.""" + RankLLMRerank() diff --git a/libs/community/tests/integration_tests/document_loaders/test_tensorflow_datasets.py b/libs/community/tests/integration_tests/document_loaders/test_tensorflow_datasets.py index 7498b65e56e..956b8683132 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_tensorflow_datasets.py +++ b/libs/community/tests/integration_tests/document_loaders/test_tensorflow_datasets.py @@ -12,7 +12,7 @@ from langchain_community.document_loaders.tensorflow_datasets import ( ) if TYPE_CHECKING: - import tensorflow as tf # noqa: E402 + import tensorflow as tf def decode_to_str(item: tf.Tensor) -> str: diff --git a/libs/community/tests/integration_tests/embeddings/test_titan_takeoff.py b/libs/community/tests/integration_tests/embeddings/test_titan_takeoff.py index 884f1a120ab..5eb78bf374c 100644 --- a/libs/community/tests/integration_tests/embeddings/test_titan_takeoff.py +++ b/libs/community/tests/integration_tests/embeddings/test_titan_takeoff.py @@ -7,7 +7,11 @@ from typing import Any import pytest from langchain_community.embeddings import TitanTakeoffEmbed -from langchain_community.embeddings.titan_takeoff import MissingConsumerGroup +from langchain_community.embeddings.titan_takeoff import ( + Device, + MissingConsumerGroup, + ReaderConfig, +) @pytest.mark.requires("pytest_httpx") @@ -24,7 +28,7 @@ def test_titan_takeoff_call(httpx_mock: Any) -> None: embedding = TitanTakeoffEmbed(port=port) - output_1 = embedding.embed_documents("What is 2 + 2?", "primary") + output_1 = embedding.embed_documents(["What is 2 + 2?"], "primary") output_2 = embedding.embed_query("What is 2 + 2?", "primary") assert isinstance(output_1, list) @@ -53,12 +57,12 @@ def test_no_consumer_group_fails(httpx_mock: Any) -> None: embedding = TitanTakeoffEmbed(port=port) with pytest.raises(MissingConsumerGroup): - embedding.embed_documents("What is 2 + 2?") + embedding.embed_documents(["What is 2 + 2?"]) with pytest.raises(MissingConsumerGroup): embedding.embed_query("What is 2 + 2?") # Check specifying a consumer group works - embedding.embed_documents("What is 2 + 2?", "primary") + embedding.embed_documents(["What is 2 + 2?"], "primary") embedding.embed_query("What is 2 + 2?", "primary") @@ -70,14 +74,16 @@ def test_takeoff_initialization(httpx_mock: Any) -> None: inf_port = 46253 mgnt_url = f"http://localhost:{mgnt_port}/reader" embed_url = f"http://localhost:{inf_port}/embed" - reader_1 = { - "model_name": "test", - "device": "cpu", - "consumer_group": "embed", - } - reader_2 = reader_1.copy() - reader_2["model_name"] = "test2" - reader_2["device"] = "cuda" + reader_1 = ReaderConfig( + model_name="test", + device=Device.cpu, + consumer_group="embed", + ) + reader_2 = ReaderConfig( + model_name="test2", + device=Device.cuda, + consumer_group="embed", + ) httpx_mock.add_response( method="POST", url=mgnt_url, json={"key": "value"}, status_code=201 @@ -94,18 +100,18 @@ def test_takeoff_initialization(httpx_mock: Any) -> None: ) # Shouldn't need to specify consumer group as there is only one specified during # initialization - output_1 = llm.embed_documents("What is 2 + 2?") + output_1 = llm.embed_documents(["What is 2 + 2?"]) output_2 = llm.embed_query("What is 2 + 2?") assert isinstance(output_1, list) assert isinstance(output_2, list) # Ensure the management api was called to create the reader assert len(httpx_mock.get_requests()) == 4 - for key, value in reader_1.items(): + for key, value in reader_1.dict().items(): assert json.loads(httpx_mock.get_requests()[0].content)[key] == value assert httpx_mock.get_requests()[0].url == mgnt_url # Also second call should be made to spin uo reader 2 - for key, value in reader_2.items(): + for key, value in reader_2.dict().items(): assert json.loads(httpx_mock.get_requests()[1].content)[key] == value assert httpx_mock.get_requests()[1].url == mgnt_url # Ensure the third call is to generate endpoint to inference @@ -126,15 +132,16 @@ def test_takeoff_initialization_with_more_than_one_consumer_group( inf_port = 46253 mgnt_url = f"http://localhost:{mgnt_port}/reader" embed_url = f"http://localhost:{inf_port}/embed" - reader_1 = { - "model_name": "test", - "device": "cpu", - "consumer_group": "embed", - } - reader_2 = reader_1.copy() - reader_2["model_name"] = "test2" - reader_2["device"] = "cuda" - reader_2["consumer_group"] = "embed2" + reader_1 = ReaderConfig( + model_name="test", + device=Device.cpu, + consumer_group="embed", + ) + reader_2 = ReaderConfig( + model_name="test2", + device=Device.cuda, + consumer_group="embed2", + ) httpx_mock.add_response( method="POST", url=mgnt_url, json={"key": "value"}, status_code=201 @@ -152,22 +159,22 @@ def test_takeoff_initialization_with_more_than_one_consumer_group( # There was more than one consumer group specified during initialization so we # need to specify which one to use with pytest.raises(MissingConsumerGroup): - llm.embed_documents("What is 2 + 2?") + llm.embed_documents(["What is 2 + 2?"]) with pytest.raises(MissingConsumerGroup): llm.embed_query("What is 2 + 2?") - output_1 = llm.embed_documents("What is 2 + 2?", "embed") + output_1 = llm.embed_documents(["What is 2 + 2?"], "embed") output_2 = llm.embed_query("What is 2 + 2?", "embed2") assert isinstance(output_1, list) assert isinstance(output_2, list) # Ensure the management api was called to create the reader assert len(httpx_mock.get_requests()) == 4 - for key, value in reader_1.items(): + for key, value in reader_1.dict().items(): assert json.loads(httpx_mock.get_requests()[0].content)[key] == value assert httpx_mock.get_requests()[0].url == mgnt_url # Also second call should be made to spin uo reader 2 - for key, value in reader_2.items(): + for key, value in reader_2.dict().items(): assert json.loads(httpx_mock.get_requests()[1].content)[key] == value assert httpx_mock.get_requests()[1].url == mgnt_url # Ensure the third call is to generate endpoint to inference diff --git a/libs/community/tests/integration_tests/graphs/test_neo4j.py b/libs/community/tests/integration_tests/graphs/test_neo4j.py index 8fe3349ee3b..2765938905e 100644 --- a/libs/community/tests/integration_tests/graphs/test_neo4j.py +++ b/libs/community/tests/integration_tests/graphs/test_neo4j.py @@ -333,3 +333,28 @@ def test_enhanced_schema() -> None: # remove metadata portion of schema del graph.structured_schema["metadata"] assert graph.structured_schema == expected_output + + +def test_enhanced_schema_exception() -> None: + """Test no error with weird schema.""" + url = os.environ.get("NEO4J_URI") + username = os.environ.get("NEO4J_USERNAME") + password = os.environ.get("NEO4J_PASSWORD") + assert url is not None + assert username is not None + assert password is not None + + graph = Neo4jGraph( + url=url, username=username, password=password, enhanced_schema=True + ) + graph.query("MATCH (n) DETACH DELETE n") + graph.query("CREATE (:Node {foo:'bar'})," "(:Node {foo: 1}), (:Node {foo: [1,2]})") + graph.refresh_schema() + expected_output = { + "node_props": {"Node": [{"property": "foo", "type": "STRING"}]}, + "rel_props": {}, + "relationships": [], + } + # remove metadata portion of schema + del graph.structured_schema["metadata"] + assert graph.structured_schema == expected_output diff --git a/libs/community/tests/integration_tests/memory/test_memory_cassandra.py b/libs/community/tests/integration_tests/memory/test_memory_cassandra.py index 6ff03ba6e77..5e7fc0535b4 100644 --- a/libs/community/tests/integration_tests/memory/test_memory_cassandra.py +++ b/libs/community/tests/integration_tests/memory/test_memory_cassandra.py @@ -1,6 +1,6 @@ import os import time -from typing import Optional +from typing import Any, Optional from langchain.memory import ConversationBufferMemory from langchain_core.messages import AIMessage, HumanMessage @@ -37,13 +37,15 @@ def _chat_message_history( # drop table if required if drop: session.execute(f"DROP TABLE IF EXISTS {keyspace}.{table_name}") - # + + kwargs: Any = {} if ttl_seconds is None else {"ttl_seconds": ttl_seconds} + return CassandraChatMessageHistory( session_id=session_id, session=session, keyspace=keyspace, table_name=table_name, - **({} if ttl_seconds is None else {"ttl_seconds": ttl_seconds}), + **kwargs, ) diff --git a/libs/community/tests/integration_tests/storage/test_cassandra.py b/libs/community/tests/integration_tests/storage/test_cassandra.py new file mode 100644 index 00000000000..88f240ed791 --- /dev/null +++ b/libs/community/tests/integration_tests/storage/test_cassandra.py @@ -0,0 +1,155 @@ +"""Implement integration tests for Cassandra storage.""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from langchain_community.storage.cassandra import CassandraByteStore +from langchain_community.utilities.cassandra import SetupMode + +if TYPE_CHECKING: + from cassandra.cluster import Session + +KEYSPACE = "storage_test_keyspace" + + +@pytest.fixture(scope="session") +def session() -> Session: + from cassandra.cluster import Cluster + + cluster = Cluster() + session = cluster.connect() + session.execute( + ( + f"CREATE KEYSPACE IF NOT EXISTS {KEYSPACE} " + f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}" + ) + ) + return session + + +def init_store(table_name: str, session: Session) -> CassandraByteStore: + store = CassandraByteStore(table=table_name, keyspace=KEYSPACE, session=session) + store.mset([("key1", b"value1"), ("key2", b"value2")]) + return store + + +async def init_async_store(table_name: str, session: Session) -> CassandraByteStore: + store = CassandraByteStore( + table=table_name, keyspace=KEYSPACE, session=session, setup_mode=SetupMode.ASYNC + ) + await store.amset([("key1", b"value1"), ("key2", b"value2")]) + return store + + +def drop_table(table_name: str, session: Session) -> None: + session.execute(f"DROP TABLE {KEYSPACE}.{table_name}") + + +async def test_mget(session: Session) -> None: + """Test CassandraByteStore mget method.""" + table_name = "lc_test_store_mget" + try: + store = init_store(table_name, session) + assert store.mget(["key1", "key2"]) == [b"value1", b"value2"] + assert await store.amget(["key1", "key2"]) == [b"value1", b"value2"] + finally: + drop_table(table_name, session) + + +async def test_amget(session: Session) -> None: + """Test CassandraByteStore amget method.""" + table_name = "lc_test_store_amget" + try: + store = await init_async_store(table_name, session) + assert await store.amget(["key1", "key2"]) == [b"value1", b"value2"] + finally: + drop_table(table_name, session) + + +def test_mset(session: Session) -> None: + """Test that multiple keys can be set with CassandraByteStore.""" + table_name = "lc_test_store_mset" + try: + init_store(table_name, session) + result = session.execute( + "SELECT row_id, body_blob FROM storage_test_keyspace.lc_test_store_mset " + "WHERE row_id = 'key1';" + ).one() + assert result.body_blob == b"value1" + result = session.execute( + "SELECT row_id, body_blob FROM storage_test_keyspace.lc_test_store_mset " + "WHERE row_id = 'key2';" + ).one() + assert result.body_blob == b"value2" + finally: + drop_table(table_name, session) + + +async def test_amset(session: Session) -> None: + """Test that multiple keys can be set with CassandraByteStore.""" + table_name = "lc_test_store_amset" + try: + await init_async_store(table_name, session) + result = session.execute( + "SELECT row_id, body_blob FROM storage_test_keyspace.lc_test_store_amset " + "WHERE row_id = 'key1';" + ).one() + assert result.body_blob == b"value1" + result = session.execute( + "SELECT row_id, body_blob FROM storage_test_keyspace.lc_test_store_amset " + "WHERE row_id = 'key2';" + ).one() + assert result.body_blob == b"value2" + finally: + drop_table(table_name, session) + + +def test_mdelete(session: Session) -> None: + """Test that deletion works as expected.""" + table_name = "lc_test_store_mdelete" + try: + store = init_store(table_name, session) + store.mdelete(["key1", "key2"]) + result = store.mget(["key1", "key2"]) + assert result == [None, None] + finally: + drop_table(table_name, session) + + +async def test_amdelete(session: Session) -> None: + """Test that deletion works as expected.""" + table_name = "lc_test_store_amdelete" + try: + store = await init_async_store(table_name, session) + await store.amdelete(["key1", "key2"]) + result = await store.amget(["key1", "key2"]) + assert result == [None, None] + finally: + drop_table(table_name, session) + + +def test_yield_keys(session: Session) -> None: + table_name = "lc_test_store_yield_keys" + try: + store = init_store(table_name, session) + assert set(store.yield_keys()) == {"key1", "key2"} + assert set(store.yield_keys(prefix="key")) == {"key1", "key2"} + assert set(store.yield_keys(prefix="lang")) == set() + finally: + drop_table(table_name, session) + + +async def test_ayield_keys(session: Session) -> None: + table_name = "lc_test_store_ayield_keys" + try: + store = await init_async_store(table_name, session) + assert {key async for key in store.ayield_keys()} == {"key1", "key2"} + assert {key async for key in store.ayield_keys(prefix="key")} == { + "key1", + "key2", + } + assert {key async for key in store.ayield_keys(prefix="lang")} == set() + finally: + drop_table(table_name, session) diff --git a/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py b/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py index 01990bc2c4b..6e207dd7bc7 100644 --- a/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py +++ b/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py @@ -10,7 +10,7 @@ from langchain_core.pydantic_v1 import ValidationError from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets if TYPE_CHECKING: - import tensorflow as tf # noqa: E402 + import tensorflow as tf def decode_to_str(item: tf.Tensor) -> str: diff --git a/libs/community/tests/integration_tests/vectorstores/qdrant/async_api/test_add_texts.py b/libs/community/tests/integration_tests/vectorstores/qdrant/async_api/test_add_texts.py index df15626b0fb..ab7ecebd6dd 100644 --- a/libs/community/tests/integration_tests/vectorstores/qdrant/async_api/test_add_texts.py +++ b/libs/community/tests/integration_tests/vectorstores/qdrant/async_api/test_add_texts.py @@ -7,7 +7,7 @@ from langchain_community.vectorstores import Qdrant from tests.integration_tests.vectorstores.fake_embeddings import ( ConsistentFakeEmbeddings, ) -from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import ( # noqa +from tests.integration_tests.vectorstores.qdrant.async_api.fixtures import ( qdrant_locations, ) diff --git a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py index 483c6ee50d2..6f132af44a2 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py +++ b/libs/community/tests/integration_tests/vectorstores/test_azure_cosmos_db.py @@ -74,7 +74,7 @@ class TestAzureCosmosDBVectorSearch: # insure the test collection is empty collection = prepare_collection() - assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501 + assert collection.count_documents({}) == 0 # type: ignore[index] @classmethod def teardown_class(cls) -> None: diff --git a/libs/community/tests/integration_tests/vectorstores/test_documentdb.py b/libs/community/tests/integration_tests/vectorstores/test_documentdb.py index 261d98751bd..805e46599ed 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_documentdb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_documentdb.py @@ -70,7 +70,7 @@ class TestDocumentDBVectorSearch: # insure the test collection is empty collection = prepare_collection() - assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501 + assert collection.count_documents({}) == 0 # type: ignore[index] @classmethod def teardown_class(cls) -> None: diff --git a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py index 6a1992cc748..fd50baf5290 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py @@ -65,6 +65,7 @@ test_setup = ConfigData() def generateSchemaName(cursor): # type: ignore[no-untyped-def] + # return "Langchain" cursor.execute( "SELECT REPLACE(CURRENT_UTCDATE, '-', '') || '_' || BINTOHEX(SYSUUID) FROM " "DUMMY;" @@ -85,6 +86,7 @@ def setup_module(module): # type: ignore[no-untyped-def] password=os.environ.get("HANA_DB_PASSWORD"), autocommit=True, sslValidateCertificate=False, + # encrypt=True ) try: cur = test_setup.conn.cursor() @@ -100,6 +102,7 @@ def setup_module(module): # type: ignore[no-untyped-def] def teardown_module(module): # type: ignore[no-untyped-def] + # return try: cur = test_setup.conn.cursor() sql_str = f"DROP SCHEMA {test_setup.schema_name} CASCADE" @@ -112,7 +115,7 @@ def teardown_module(module): # type: ignore[no-untyped-def] @pytest.fixture def texts() -> List[str]: - return ["foo", "bar", "baz"] + return ["foo", "bar", "baz", "bak", "cat"] @pytest.fixture @@ -121,6 +124,8 @@ def metadatas() -> List[str]: {"start": 0, "end": 100, "quality": "good", "ready": True}, # type: ignore[list-item] {"start": 100, "end": 200, "quality": "bad", "ready": False}, # type: ignore[list-item] {"start": 200, "end": 300, "quality": "ugly", "ready": True}, # type: ignore[list-item] + {"start": 200, "quality": "ugly", "ready": True, "Owner": "Steve"}, # type: ignore[list-item] + {"start": 300, "quality": "ugly", "Owner": "Steve"}, # type: ignore[list-item] ] @@ -640,14 +645,14 @@ def test_hanavector_delete_with_filter(texts: List[str], metadatas: List[dict]) table_name=table_name, ) - search_result = vectorDB.similarity_search(texts[0], 3) - assert len(search_result) == 3 + search_result = vectorDB.similarity_search(texts[0], 10) + assert len(search_result) == 5 # Delete one of the three entries assert vectorDB.delete(filter={"start": 100, "end": 200}) - search_result = vectorDB.similarity_search(texts[0], 3) - assert len(search_result) == 2 + search_result = vectorDB.similarity_search(texts[0], 10) + assert len(search_result) == 4 @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -667,14 +672,14 @@ async def test_hanavector_delete_with_filter_async( table_name=table_name, ) - search_result = vectorDB.similarity_search(texts[0], 3) - assert len(search_result) == 3 + search_result = vectorDB.similarity_search(texts[0], 10) + assert len(search_result) == 5 # Delete one of the three entries assert await vectorDB.adelete(filter={"start": 100, "end": 200}) - search_result = vectorDB.similarity_search(texts[0], 3) - assert len(search_result) == 2 + search_result = vectorDB.similarity_search(texts[0], 10) + assert len(search_result) == 4 @pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") @@ -861,7 +866,7 @@ def test_hanavector_filter_prepared_statement_params( sql_str = f"SELECT * FROM {table_name} WHERE JSON_VALUE(VEC_META, '$.ready') = ?" cur.execute(sql_str, (query_value)) rows = cur.fetchall() - assert len(rows) == 2 + assert len(rows) == 3 # query_value = False query_value = "false" # type: ignore[assignment] @@ -1094,3 +1099,336 @@ def test_pgvector_with_with_metadata_filters_5( ids = [doc.metadata["id"] for doc in docs] assert len(ids) == len(expected_ids), test_filter assert set(ids).issubset(expected_ids), test_filter + + +@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") +def test_preexisting_specific_columns_for_metadata_fill( + texts: List[str], metadatas: List[dict] +) -> None: + table_name = "PREEXISTING_FILTER_COLUMNS" + # drop_table(test_setup.conn, table_name) + + sql_str = ( + f'CREATE TABLE "{table_name}" (' + f'"VEC_TEXT" NCLOB, ' + f'"VEC_META" NCLOB, ' + f'"VEC_VECTOR" REAL_VECTOR, ' + f'"Owner" NVARCHAR(100), ' + f'"quality" NVARCHAR(100));' + ) + try: + cur = test_setup.conn.cursor() + cur.execute(sql_str) + finally: + cur.close() + + vectorDB = HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["Owner", "quality"], + ) + + c = 0 + try: + sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'" + cur = test_setup.conn.cursor() + cur.execute(sql_str) + if cur.has_result_set(): + rows = cur.fetchall() + c = rows[0][0] + finally: + cur.close() + assert c == 3 + + docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"}) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100}) + assert len(docs) == 1 + assert docs[0].page_content == "bar" + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 100, "quality": "good"} + ) + assert len(docs) == 0 + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 0, "quality": "good"} + ) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + +@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") +def test_preexisting_specific_columns_for_metadata_via_array( + texts: List[str], metadatas: List[dict] +) -> None: + table_name = "PREEXISTING_FILTER_COLUMNS_VIA_ARRAY" + # drop_table(test_setup.conn, table_name) + + sql_str = ( + f'CREATE TABLE "{table_name}" (' + f'"VEC_TEXT" NCLOB, ' + f'"VEC_META" NCLOB, ' + f'"VEC_VECTOR" REAL_VECTOR, ' + f'"Owner" NVARCHAR(100), ' + f'"quality" NVARCHAR(100));' + ) + try: + cur = test_setup.conn.cursor() + cur.execute(sql_str) + finally: + cur.close() + + vectorDB = HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["quality"], + ) + + c = 0 + try: + sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "quality"=' f"'ugly'" + cur = test_setup.conn.cursor() + cur.execute(sql_str) + if cur.has_result_set(): + rows = cur.fetchall() + c = rows[0][0] + finally: + cur.close() + assert c == 3 + + try: + sql_str = f'SELECT COUNT(*) FROM {table_name} WHERE "Owner"=' f"'Steve'" + cur = test_setup.conn.cursor() + cur.execute(sql_str) + if cur.has_result_set(): + rows = cur.fetchall() + c = rows[0][0] + finally: + cur.close() + assert c == 0 + + docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"}) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100}) + assert len(docs) == 1 + assert docs[0].page_content == "bar" + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 100, "quality": "good"} + ) + assert len(docs) == 0 + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 0, "quality": "good"} + ) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + +@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") +def test_preexisting_specific_columns_for_metadata_multiple_columns( + texts: List[str], metadatas: List[dict] +) -> None: + table_name = "PREEXISTING_FILTER_MULTIPLE_COLUMNS" + # drop_table(test_setup.conn, table_name) + + sql_str = ( + f'CREATE TABLE "{table_name}" (' + f'"VEC_TEXT" NCLOB, ' + f'"VEC_META" NCLOB, ' + f'"VEC_VECTOR" REAL_VECTOR, ' + f'"quality" NVARCHAR(100), ' + f'"start" INTEGER);' + ) + try: + cur = test_setup.conn.cursor() + cur.execute(sql_str) + finally: + cur.close() + + vectorDB = HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["quality", "start"], + ) + + docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"}) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100}) + assert len(docs) == 1 + assert docs[0].page_content == "bar" + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 100, "quality": "good"} + ) + assert len(docs) == 0 + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 0, "quality": "good"} + ) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + +@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") +def test_preexisting_specific_columns_for_metadata_empty_columns( + texts: List[str], metadatas: List[dict] +) -> None: + table_name = "PREEXISTING_FILTER_MULTIPLE_COLUMNS_EMPTY" + # drop_table(test_setup.conn, table_name) + + sql_str = ( + f'CREATE TABLE "{table_name}" (' + f'"VEC_TEXT" NCLOB, ' + f'"VEC_META" NCLOB, ' + f'"VEC_VECTOR" REAL_VECTOR, ' + f'"quality" NVARCHAR(100), ' + f'"ready" BOOLEAN, ' + f'"start" INTEGER);' + ) + try: + cur = test_setup.conn.cursor() + cur.execute(sql_str) + finally: + cur.close() + + vectorDB = HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["quality", "ready", "start"], + ) + + docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"}) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + docs = vectorDB.similarity_search("hello", k=5, filter={"start": 100}) + assert len(docs) == 1 + assert docs[0].page_content == "bar" + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 100, "quality": "good"} + ) + assert len(docs) == 0 + + docs = vectorDB.similarity_search( + "hello", k=5, filter={"start": 0, "quality": "good"} + ) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + + docs = vectorDB.similarity_search("hello", k=5, filter={"ready": True}) + assert len(docs) == 3 + + +@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") +def test_preexisting_specific_columns_for_metadata_wrong_type_or_non_existing( + texts: List[str], metadatas: List[dict] +) -> None: + table_name = "PREEXISTING_FILTER_COLUMNS_WRONG_TYPE" + # drop_table(test_setup.conn, table_name) + + sql_str = ( + f'CREATE TABLE "{table_name}" (' + f'"VEC_TEXT" NCLOB, ' + f'"VEC_META" NCLOB, ' + f'"VEC_VECTOR" REAL_VECTOR, ' + f'"quality" INTEGER); ' + ) + try: + cur = test_setup.conn.cursor() + cur.execute(sql_str) + finally: + cur.close() + + # Check if table is created + exception_occured = False + try: + HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["quality"], + ) + exception_occured = False + except dbapi.Error: # Nothing we should do here, hdbcli will throw an error + exception_occured = True + assert exception_occured # Check if table is created + + exception_occured = False + try: + HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["NonExistingColumn"], + ) + exception_occured = False + except AttributeError: # Nothing we should do here, hdbcli will throw an error + exception_occured = True + assert exception_occured + + +@pytest.mark.skipif(not hanadb_installed, reason="hanadb not installed") +def test_preexisting_specific_columns_for_returned_metadata_completeness( + texts: List[str], metadatas: List[dict] +) -> None: + table_name = "PREEXISTING_FILTER_COLUMNS_METADATA_COMPLETENESS" + # drop_table(test_setup.conn, table_name) + + sql_str = ( + f'CREATE TABLE "{table_name}" (' + f'"VEC_TEXT" NCLOB, ' + f'"VEC_META" NCLOB, ' + f'"VEC_VECTOR" REAL_VECTOR, ' + f'"quality" NVARCHAR(100), ' + f'"NonExisting" NVARCHAR(100), ' + f'"ready" BOOLEAN, ' + f'"start" INTEGER);' + ) + try: + cur = test_setup.conn.cursor() + cur.execute(sql_str) + finally: + cur.close() + + vectorDB = HanaDB.from_texts( + connection=test_setup.conn, + texts=texts, + metadatas=metadatas, + embedding=embedding, + table_name=table_name, + specific_metadata_columns=["quality", "ready", "start", "NonExisting"], + ) + + docs = vectorDB.similarity_search("hello", k=5, filter={"quality": "good"}) + assert len(docs) == 1 + assert docs[0].page_content == "foo" + assert docs[0].metadata["end"] == 100 + assert docs[0].metadata["start"] == 0 + assert docs[0].metadata["quality"] == "good" + assert docs[0].metadata["ready"] + assert "NonExisting" not in docs[0].metadata.keys() diff --git a/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py b/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py index d88c1d34a6c..d5a0b547df3 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py +++ b/libs/community/tests/integration_tests/vectorstores/test_mongodb_atlas.py @@ -34,7 +34,7 @@ class TestMongoDBAtlasVectorSearch: def setup_class(cls) -> None: # insure the test collection is empty collection = get_collection() - assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501 + assert collection.count_documents({}) == 0 # type: ignore[index] @classmethod def teardown_class(cls) -> None: diff --git a/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py b/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py index 66b5425692b..be164fdf234 100644 --- a/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py +++ b/libs/community/tests/unit_tests/callbacks/fake_callback_handler.py @@ -21,7 +21,7 @@ class BaseFakeCallbackHandler(BaseModel): ignore_retriever_: bool = False ignore_chat_model_: bool = False - # to allow for similar callback handlers that are not technicall equal + # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests diff --git a/libs/community/tests/unit_tests/callbacks/test_openai_info.py b/libs/community/tests/unit_tests/callbacks/test_openai_info.py index 64d0ae57d15..b139435fc71 100644 --- a/libs/community/tests/unit_tests/callbacks/test_openai_info.py +++ b/libs/community/tests/unit_tests/callbacks/test_openai_info.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock from uuid import uuid4 +import numpy as np import pytest from langchain_core.outputs import LLMResult @@ -58,7 +59,7 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None: ("davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04", 0.24), ("ft:babbage-002:your-org:custom-model-name:1abcdefg", 0.0032), ("ft:davinci-002:your-org:custom-model-name:1abcdefg", 0.024), - ("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.028), + ("ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg", 0.009), ("babbage-002.ft-0123456789abcdefghijklmnopqrstuv", 0.0008), ("davinci-002.ft-0123456789abcdefghijklmnopqrstuv", 0.004), ("gpt-35-turbo-0613.ft-0123456789abcdefghijklmnopqrstuv", 0.0035), @@ -79,7 +80,7 @@ def test_on_llm_end_finetuned_model( }, ) handler.on_llm_end(response) - assert handler.total_cost == expected_cost + assert np.isclose(handler.total_cost, expected_cost) @pytest.mark.parametrize( diff --git a/libs/community/tests/unit_tests/chat_message_histories/test_imports.py b/libs/community/tests/unit_tests/chat_message_histories/test_imports.py index 9a021687ec5..a9f26ee7d26 100644 --- a/libs/community/tests/unit_tests/chat_message_histories/test_imports.py +++ b/libs/community/tests/unit_tests/chat_message_histories/test_imports.py @@ -22,6 +22,7 @@ EXPECTED_ALL = [ "UpstashRedisChatMessageHistory", "XataChatMessageHistory", "ZepChatMessageHistory", + "ZepCloudChatMessageHistory", ] diff --git a/libs/community/tests/unit_tests/chat_models/test_javelin_ai_gateway.py b/libs/community/tests/unit_tests/chat_models/test_javelin_ai_gateway.py index 7c4500d340d..c612747dd5d 100644 --- a/libs/community/tests/unit_tests/chat_models/test_javelin_ai_gateway.py +++ b/libs/community/tests/unit_tests/chat_models/test_javelin_ai_gateway.py @@ -30,3 +30,17 @@ def test_api_key_masked_when_passed_via_constructor() -> None: assert str(llm.javelin_api_key) == "**********" assert "secret-api-key" not in repr(llm.javelin_api_key) assert "secret-api-key" not in repr(llm) + + +@pytest.mark.requires("javelin_sdk") +def test_api_key_alias() -> None: + for model in [ + ChatJavelinAIGateway( + route="", + javelin_api_key="secret-api-key", + ), + ChatJavelinAIGateway( + route="", api_key="secret-api-key" + ), + ]: + assert str(model.javelin_api_key) == "**********" diff --git a/libs/community/tests/unit_tests/data/openapi_specs/openapi_spec_header_param.json b/libs/community/tests/unit_tests/data/openapi_specs/openapi_spec_header_param.json new file mode 100644 index 00000000000..ff38939c0a8 --- /dev/null +++ b/libs/community/tests/unit_tests/data/openapi_specs/openapi_spec_header_param.json @@ -0,0 +1,34 @@ +{ + "openapi": "3.0.0", + "info": { + "version": "1.0.0", + "title": "Swagger Petstore", + "license": { + "name": "MIT" + } + }, + "servers": [ + { + "url": "http://petstore.swagger.io/v1" + } + ], + "paths": { + "/pets": { + "get": { + "summary": "Info for a specific pet", + "operationId": "showPetById", + "parameters": [ + { + "name": "header_param", + "in": "header", + "required": true, + "description": "A header param", + "schema": { + "type": "string" + } + } + ] + } + } + } + } \ No newline at end of file diff --git a/libs/community/tests/unit_tests/document_compressors/test_imports.py b/libs/community/tests/unit_tests/document_compressors/test_imports.py index c0b08904bf9..0cc7c1de9bf 100644 --- a/libs/community/tests/unit_tests/document_compressors/test_imports.py +++ b/libs/community/tests/unit_tests/document_compressors/test_imports.py @@ -4,6 +4,7 @@ EXPECTED_ALL = [ "LLMLinguaCompressor", "OpenVINOReranker", "JinaRerank", + "RankLLMRerank", "FlashrankRerank", ] diff --git a/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_cloud_blob_loader.py b/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_cloud_blob_loader.py new file mode 100644 index 00000000000..53ad0da98b7 --- /dev/null +++ b/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_cloud_blob_loader.py @@ -0,0 +1,166 @@ +"""Verify that file system blob loader works as expected.""" +import os +import tempfile +from typing import Generator +from urllib.parse import urlparse + +import pytest + +from langchain_community.document_loaders.blob_loaders import CloudBlobLoader + + +@pytest.fixture +def toy_dir() -> Generator[str, None, None]: + """Yield a pre-populated directory to test the blob loader.""" + with tempfile.TemporaryDirectory() as temp_dir: + # Create test.txt + with open(os.path.join(temp_dir, "test.txt"), "w") as test_txt: + test_txt.write("This is a test.txt file.") + + # Create test.html + with open(os.path.join(temp_dir, "test.html"), "w") as test_html: + test_html.write( + "

This is a test.html file.

" + ) + + # Create .hidden_file + with open(os.path.join(temp_dir, ".hidden_file"), "w") as hidden_file: + hidden_file.write("This is a hidden file.") + + # Create some_dir/nested_file.txt + some_dir = os.path.join(temp_dir, "some_dir") + os.makedirs(some_dir) + with open(os.path.join(some_dir, "nested_file.txt"), "w") as nested_file: + nested_file.write("This is a nested_file.txt file.") + + # Create some_dir/other_dir/more_nested.txt + other_dir = os.path.join(some_dir, "other_dir") + os.makedirs(other_dir) + with open(os.path.join(other_dir, "more_nested.txt"), "w") as nested_file: + nested_file.write("This is a more_nested.txt file.") + + yield f"file://{temp_dir}" + + +# @pytest.fixture +# @pytest.mark.requires("boto3") +# def toy_dir() -> str: +# return "s3://ppr-langchain-test" + + +_TEST_CASES = [ + { + "glob": "**/[!.]*", + "suffixes": None, + "exclude": (), + "relative_filenames": [ + "test.html", + "test.txt", + "some_dir/nested_file.txt", + "some_dir/other_dir/more_nested.txt", + ], + }, + { + "glob": "*", + "suffixes": None, + "exclude": (), + "relative_filenames": ["test.html", "test.txt", ".hidden_file"], + }, + { + "glob": "**/*.html", + "suffixes": None, + "exclude": (), + "relative_filenames": ["test.html"], + }, + { + "glob": "*/*.txt", + "suffixes": None, + "exclude": (), + "relative_filenames": ["some_dir/nested_file.txt"], + }, + { + "glob": "**/*.txt", + "suffixes": None, + "exclude": (), + "relative_filenames": [ + "test.txt", + "some_dir/nested_file.txt", + "some_dir/other_dir/more_nested.txt", + ], + }, + { + "glob": "**/*", + "suffixes": [".txt"], + "exclude": (), + "relative_filenames": [ + "test.txt", + "some_dir/nested_file.txt", + "some_dir/other_dir/more_nested.txt", + ], + }, + { + "glob": "meeeeeeow", + "suffixes": None, + "exclude": (), + "relative_filenames": [], + }, + { + "glob": "*", + "suffixes": [".html", ".txt"], + "exclude": (), + "relative_filenames": ["test.html", "test.txt"], + }, + # Using exclude patterns + { + "glob": "**/*", + "suffixes": [".txt"], + "exclude": ("some_dir/*",), + "relative_filenames": ["test.txt", "some_dir/other_dir/more_nested.txt"], + }, + # Using 2 exclude patterns, one of which is recursive + { + "glob": "**/*", + "suffixes": None, + "exclude": ("**/*.txt", ".hidden*"), + "relative_filenames": ["test.html"], + }, +] + + +@pytest.mark.requires("cloudpathlib") +@pytest.mark.parametrize("params", _TEST_CASES) +def test_file_names_exist(toy_dir: str, params: dict) -> None: + """Verify that the file names exist.""" + + glob_pattern = params["glob"] + suffixes = params["suffixes"] + exclude = params["exclude"] + relative_filenames = params["relative_filenames"] + + loader = CloudBlobLoader( + toy_dir, glob=glob_pattern, suffixes=suffixes, exclude=exclude + ) + blobs = list(loader.yield_blobs()) + + url_parsed = urlparse(toy_dir) + scheme = "" + if url_parsed.scheme == "file": + scheme = "file://" + + file_names = sorted(f"{scheme}{blob.path}" for blob in blobs) + + expected_filenames = sorted( + str(toy_dir + "/" + relative_filename) + for relative_filename in relative_filenames + ) + + assert file_names == expected_filenames + assert loader.count_matching_files() == len(relative_filenames) + + +@pytest.mark.requires("cloudpathlib") +def test_show_progress(toy_dir: str) -> None: + """Verify that file system loader works with a progress bar.""" + loader = CloudBlobLoader(toy_dir) + blobs = list(loader.yield_blobs()) + assert len(blobs) == loader.count_matching_files() diff --git a/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py b/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py index 8dd6f361670..e3f53c0827a 100644 --- a/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py +++ b/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_public_api.py @@ -6,6 +6,7 @@ def test_public_api() -> None: assert sorted(__all__) == [ "Blob", "BlobLoader", + "CloudBlobLoader", "FileSystemBlobLoader", "YoutubeAudioLoader", ] diff --git a/libs/community/tests/unit_tests/document_loaders/parsers/language/test_lua.py b/libs/community/tests/unit_tests/document_loaders/parsers/language/test_lua.py index afb50b8345b..dab2ea8474b 100644 --- a/libs/community/tests/unit_tests/document_loaders/parsers/language/test_lua.py +++ b/libs/community/tests/unit_tests/document_loaders/parsers/language/test_lua.py @@ -33,7 +33,7 @@ end""" @pytest.mark.skip( reason=( "Flakey. To be investigated. See " - "https://github.com/langchain-ai/langchain/actions/runs/7907779756/job/21585580650." # noqa: E501 + "https://github.com/langchain-ai/langchain/actions/runs/7907779756/job/21585580650." ) ) def test_extract_functions_classes(self) -> None: @@ -46,7 +46,7 @@ end""" reason=( "Flakey. To be investigated. See " "https://github.com/langchain-ai/langchain/actions/runs/7923203031/job/21632416298?pr=17599 " # noqa: E501 - "and https://github.com/langchain-ai/langchain/actions/runs/7923784089/job/2163420864." # noqa: E501 + "and https://github.com/langchain-ai/langchain/actions/runs/7923784089/job/2163420864." ) ) def test_simplify_code(self) -> None: diff --git a/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py b/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py index a9d4212c15e..a7ab65e35a4 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py +++ b/libs/community/tests/unit_tests/document_loaders/test_csv_loader.py @@ -85,6 +85,29 @@ class TestCSVLoader: # Assert assert result == expected_docs + def test_csv_loader_load_none_column_file(self) -> None: + # Setup + file_path = self._get_csv_file_path("test_none_col.csv") + expected_docs = [ + Document( + page_content="column1: value1\ncolumn2: value2\n" + "column3: value3\nNone: value4,value5", + metadata={"source": file_path, "row": 0}, + ), + Document( + page_content="column1: value6\ncolumn2: value7\n" + "column3: value8\nNone: value9", + metadata={"source": file_path, "row": 1}, + ), + ] + + # Exercise + loader = CSVLoader(file_path=file_path) + result = loader.load() + + # Assert + assert result == expected_docs + # utility functions def _get_csv_file_path(self, file_name: str) -> str: return str(Path(__file__).resolve().parent / "test_docs" / "csv" / file_name) diff --git a/libs/community/tests/unit_tests/document_loaders/test_directory_loader.py b/libs/community/tests/unit_tests/document_loaders/test_directory_loader.py index d8795a6ce3b..e385e77b33e 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_directory_loader.py +++ b/libs/community/tests/unit_tests/document_loaders/test_directory_loader.py @@ -58,6 +58,22 @@ class TestDirectoryLoader: "row": 1, }, ), + Document( + page_content="column1: value1\ncolumn2: value2\n" + "column3: value3\nNone: value4,value5", + metadata={ + "source": self._get_csv_file_path("test_none_col.csv"), + "row": 0, + }, + ), + Document( + page_content="column1: value6\ncolumn2: value7\n" + "column3: value8\nNone: value9", + metadata={ + "source": self._get_csv_file_path("test_none_col.csv"), + "row": 1, + }, + ), ] loaded_docs = sorted(loader.load(), key=lambda doc: doc.metadata["source"]) @@ -141,6 +157,20 @@ class TestDirectoryLoader: metadata={"source": file_path, "row": 0}, ) ] + file_name = "test_none_col.csv" + file_path = self._get_csv_file_path(file_name) + expected_docs += [ + Document( + page_content="column1: value1\ncolumn2: value2\n" + "column3: value3\nNone: value4,value5", + metadata={"source": file_path, "row": 0}, + ), + Document( + page_content="column1: value6\ncolumn2: value7\n" + "column3: value8\nNone: value9", + metadata={"source": file_path, "row": 1}, + ), + ] # Assert loader = DirectoryLoader(dir_path, loader_cls=CSVLoader) diff --git a/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_none_col.csv b/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_none_col.csv new file mode 100644 index 00000000000..a6a3d77e050 --- /dev/null +++ b/libs/community/tests/unit_tests/document_loaders/test_docs/csv/test_none_col.csv @@ -0,0 +1,3 @@ +column1,column2,column3 +value1,value2,value3,value4,value5 +value6,value7,value8,value9 diff --git a/libs/community/tests/unit_tests/document_loaders/test_imports.py b/libs/community/tests/unit_tests/document_loaders/test_imports.py index a8890aabe0e..0f8628d20e8 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_imports.py +++ b/libs/community/tests/unit_tests/document_loaders/test_imports.py @@ -138,6 +138,7 @@ EXPECTED_ALL = [ "RocksetLoader", "S3DirectoryLoader", "S3FileLoader", + "ScrapflyLoader", "SQLDatabaseLoader", "SRTLoader", "SeleniumURLLoader", diff --git a/libs/community/tests/unit_tests/embeddings/test_imports.py b/libs/community/tests/unit_tests/embeddings/test_imports.py index f059e525051..fbf40de973b 100644 --- a/libs/community/tests/unit_tests/embeddings/test_imports.py +++ b/libs/community/tests/unit_tests/embeddings/test_imports.py @@ -1,6 +1,7 @@ from langchain_community.embeddings import __all__, _module_lookup EXPECTED_ALL = [ + "ClovaEmbeddings", "OpenAIEmbeddings", "AnyscaleEmbeddings", "AzureOpenAIEmbeddings", diff --git a/libs/community/tests/unit_tests/llms/test_llamafile.py b/libs/community/tests/unit_tests/llms/test_llamafile.py index 10fea66a5ac..186629149f5 100644 --- a/libs/community/tests/unit_tests/llms/test_llamafile.py +++ b/libs/community/tests/unit_tests/llms/test_llamafile.py @@ -42,7 +42,7 @@ def mock_response() -> requests.Response: def mock_response_stream(): # type: ignore[no-untyped-def] mock_response = deque( [ - b'data: {"content":"the","multimodal":false,"slot_id":0,"stop":false}\n\n', # noqa + b'data: {"content":"the","multimodal":false,"slot_id":0,"stop":false}\n\n', b'data: {"content":" quick","multimodal":false,"slot_id":0,"stop":false}\n\n', # noqa ] ) diff --git a/libs/community/tests/unit_tests/retrievers/test_imports.py b/libs/community/tests/unit_tests/retrievers/test_imports.py index 4f968bd9cb0..baf099d5e68 100644 --- a/libs/community/tests/unit_tests/retrievers/test_imports.py +++ b/libs/community/tests/unit_tests/retrievers/test_imports.py @@ -43,6 +43,7 @@ EXPECTED_ALL = [ "WebResearchRetriever", "YouRetriever", "ZepRetriever", + "ZepCloudRetriever", "ZillizRetriever", "DocArrayRetriever", "NeuralDBRetriever", diff --git a/libs/community/tests/unit_tests/storage/test_imports.py b/libs/community/tests/unit_tests/storage/test_imports.py index e624ecd07c2..750b7c5a3e2 100644 --- a/libs/community/tests/unit_tests/storage/test_imports.py +++ b/libs/community/tests/unit_tests/storage/test_imports.py @@ -3,6 +3,7 @@ from langchain_community.storage import __all__, _module_lookup EXPECTED_ALL = [ "AstraDBStore", "AstraDBByteStore", + "CassandraByteStore", "MongoDBStore", "RedisStore", "UpstashRedisByteStore", diff --git a/libs/community/tests/unit_tests/test_cache.py b/libs/community/tests/unit_tests/test_cache.py index 75798ba8538..d3c93880214 100644 --- a/libs/community/tests/unit_tests/test_cache.py +++ b/libs/community/tests/unit_tests/test_cache.py @@ -22,7 +22,7 @@ except ImportError: from langchain.globals import get_llm_cache, set_llm_cache from langchain_core.outputs import Generation, LLMResult -from langchain_community.cache import SQLAlchemyCache # noqa: E402 +from langchain_community.cache import SQLAlchemyCache from tests.unit_tests.llms.fake_llm import FakeLLM diff --git a/libs/community/tests/unit_tests/test_document_transformers.py b/libs/community/tests/unit_tests/test_document_transformers.py index 88f1d6dc9d9..78b0f08c77a 100644 --- a/libs/community/tests/unit_tests/test_document_transformers.py +++ b/libs/community/tests/unit_tests/test_document_transformers.py @@ -3,7 +3,7 @@ import pytest pytest.importorskip("langchain_community") -from langchain_community.document_transformers.embeddings_redundant_filter import ( # noqa: E402,E501 +from langchain_community.document_transformers.embeddings_redundant_filter import ( # noqa: E402 _filter_similar_embeddings, ) from langchain_community.utils.math import cosine_similarity # noqa: E402 diff --git a/libs/community/tests/unit_tests/test_imports.py b/libs/community/tests/unit_tests/test_imports.py index 8567ff3a245..59152b3b046 100644 --- a/libs/community/tests/unit_tests/test_imports.py +++ b/libs/community/tests/unit_tests/test_imports.py @@ -1,16 +1,170 @@ +import ast import glob import importlib from pathlib import Path +from typing import List, Tuple + +COMMUNITY_ROOT = Path(__file__).parent.parent.parent / "langchain_community" +ALL_COMMUNITY_GLOB = COMMUNITY_ROOT.as_posix() + "/**/*.py" +HERE = Path(__file__).parent +ROOT = HERE.parent.parent def test_importable_all() -> None: - for path in glob.glob("../community/langchain_community/*"): - relative_path = Path(path).parts[-1] - if relative_path.endswith(".typed"): - continue - module_name = relative_path.split(".")[0] + for path in glob.glob(ALL_COMMUNITY_GLOB): + # Relative to community root + relative_path = Path(path).relative_to(COMMUNITY_ROOT) + str_path = str(relative_path) + if str_path.endswith("__init__.py"): + module_name = str(relative_path.parent).replace("/", ".") + else: + module_name = str(relative_path.with_suffix("")).replace("/", ".") - module = importlib.import_module("langchain_community." + module_name) + try: + module = importlib.import_module("langchain_community." + module_name) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Could not import `{module_name}`. Defined in path: {path}" + ) from e all_ = getattr(module, "__all__", []) for cls_ in all_: getattr(module, cls_) + + +def test_glob_correct() -> None: + """Verify that the glob pattern is correct.""" + paths = list(glob.glob(ALL_COMMUNITY_GLOB)) + # Get paths relative to community root + paths_ = [Path(path).relative_to(COMMUNITY_ROOT) for path in paths] + # Assert there's a callback paths + assert Path("callbacks/__init__.py") in paths_ + + +def _check_correct_or_not_defined__all__(code: str) -> bool: + """Return True if __all__ is correctly defined or not defined at all.""" + # Parse the code into an AST + tree = ast.parse(code) + + all_good = True + + # Iterate through the body of the AST to find assignments + for node in tree.body: + # Check if the node is an assignment + if isinstance(node, ast.Assign): + # Check if the target of the assignment is '__all__' + for target in node.targets: + if isinstance(target, ast.Name) and target.id == "__all__": + # Check if the value assigned is a list + if isinstance(node.value, ast.List): + # Verify all elements in the list are string literals + if all(isinstance(el, ast.Str) for el in node.value.elts): + pass + else: + all_good = False + else: + all_good = False + return all_good + + +def test_no_dynamic__all__() -> None: + """Verify that __all__ is not computed at runtime. + + Computing __all__ dynamically can confuse static typing tools like pyright. + + __all__ should always be listed as an explicit list of string literals. + """ + bad_definitions = [] + for path in glob.glob(ALL_COMMUNITY_GLOB): + if not path.endswith("__init__.py"): + continue + + with open(path, "r") as file: + code = file.read() + + if _check_correct_or_not_defined__all__(code) is False: + bad_definitions.append(path) + + if bad_definitions: + raise AssertionError( + f"__all__ is not correctly defined in the " + f"following files: {sorted(bad_definitions)}" + ) + + +def _extract_type_checking_imports(code: str) -> List[Tuple[str, str]]: + """Extract all TYPE CHECKING imports that import from langchain_community.""" + imports: List[Tuple[str, str]] = [] + + tree = ast.parse(code) + + class TypeCheckingVisitor(ast.NodeVisitor): + def visit_ImportFrom(self, node: ast.ImportFrom) -> None: + if node.module: + for alias in node.names: + imports.append((node.module, alias.name)) + + class GlobalScopeVisitor(ast.NodeVisitor): + def visit_If(self, node: ast.If) -> None: + if ( + isinstance(node.test, ast.Name) + and node.test.id == "TYPE_CHECKING" + and isinstance(node.test.ctx, ast.Load) + ): + TypeCheckingVisitor().visit(node) + self.generic_visit(node) + + GlobalScopeVisitor().visit(tree) + return imports + + +def test_init_files_properly_defined() -> None: + """This is part of a set of tests that verify that init files are properly + + defined if they're using dynamic imports. + """ + # Please never ever add more modules to this list. + # Do feel free to fix the underlying issues and remove exceptions + # from the list. + excepted_modules = {"llms"} # NEVER ADD MORE MODULES TO THIS LIST + for path in glob.glob(ALL_COMMUNITY_GLOB): + # Relative to community root + relative_path = Path(path).relative_to(COMMUNITY_ROOT) + str_path = str(relative_path) + + if not str_path.endswith("__init__.py"): + continue + + module_name = str(relative_path.parent).replace("/", ".") + + if module_name in excepted_modules: + continue + + code = Path(path).read_text() + + # Check for dynamic __getattr__ definition in the __init__ file + if "__getattr__" not in code: + continue + + try: + module = importlib.import_module("langchain_community." + module_name) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Could not import `{module_name}`. Defined in path: {path}" + ) from e + + if not hasattr(module, "__all__"): + raise AssertionError( + f"__all__ not defined in {module_name}. This is required " + f"if __getattr__ is defined." + ) + + imports = _extract_type_checking_imports(code) + + # Get the names of all the TYPE CHECKING imports + names = [name for _, name in imports] + + missing_imports = set(module.__all__) - set(names) + + assert ( + not missing_imports + ), f"Missing imports: {missing_imports} in file path: {path}" diff --git a/libs/community/tests/unit_tests/utilities/test_openapi.py b/libs/community/tests/unit_tests/utilities/test_openapi.py new file mode 100644 index 00000000000..e7e8b745573 --- /dev/null +++ b/libs/community/tests/unit_tests/utilities/test_openapi.py @@ -0,0 +1,44 @@ +from pathlib import Path + +import pytest +from langchain.chains.openai_functions.openapi import openapi_spec_to_openai_fn + +from langchain_community.utilities.openapi import ( # noqa: E402 # ignore: community-import + OpenAPISpec, +) + +EXPECTED_OPENAI_FUNCTIONS_HEADER_PARAM = [ + { + "name": "showPetById", + "description": "Info for a specific pet", + "parameters": { + "type": "object", + "properties": { + "headers": { + "type": "object", + "properties": { + "header_param": { + "type": "string", + "description": "A header param", + } + }, + "required": ["header_param"], + } + }, + }, + } +] + + +@pytest.mark.requires("openapi_pydantic") +def test_header_param() -> None: + spec = OpenAPISpec.from_file( + Path(__file__).parent.parent + / "data" + / "openapi_specs" + / "openapi_spec_header_param.json", + ) + + openai_functions, _ = openapi_spec_to_openai_fn(spec) + + assert openai_functions == EXPECTED_OPENAI_FUNCTIONS_HEADER_PARAM diff --git a/libs/community/tests/unit_tests/utilities/test_rememberizer.py b/libs/community/tests/unit_tests/utilities/test_rememberizer.py index 3b288a107f9..f6fe63b03f4 100644 --- a/libs/community/tests/unit_tests/utilities/test_rememberizer.py +++ b/libs/community/tests/unit_tests/utilities/test_rememberizer.py @@ -23,7 +23,9 @@ class TestRememberizerAPIWrapper(unittest.TestCase): ] }, ) - wrapper = RememberizerAPIWrapper(rememberizer_api_key="dummy_key", n=10) + wrapper = RememberizerAPIWrapper( + rememberizer_api_key="dummy_key", top_k_results=10 + ) result = wrapper.search("test") self.assertEqual( result, @@ -44,7 +46,9 @@ class TestRememberizerAPIWrapper(unittest.TestCase): status=400, json={"detail": "Incorrect authentication credentials."}, ) - wrapper = RememberizerAPIWrapper(rememberizer_api_key="dummy_key", n=10) + wrapper = RememberizerAPIWrapper( + rememberizer_api_key="dummy_key", top_k_results=10 + ) with self.assertRaises(ValueError) as e: wrapper.search("test") self.assertEqual( @@ -66,7 +70,9 @@ class TestRememberizerAPIWrapper(unittest.TestCase): "document": {"id": "id2", "name": "name2"}, }, ] - wrapper = RememberizerAPIWrapper(rememberizer_api_key="dummy_key", n=10) + wrapper = RememberizerAPIWrapper( + rememberizer_api_key="dummy_key", top_k_results=10 + ) result = wrapper.load("test") self.assertEqual(len(result), 2) self.assertEqual(result[0].page_content, "content1") diff --git a/libs/community/tests/unit_tests/vectorstores/test_imports.py b/libs/community/tests/unit_tests/vectorstores/test_imports.py index 560e857d072..397dc581a2c 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_imports.py +++ b/libs/community/tests/unit_tests/vectorstores/test_imports.py @@ -50,6 +50,8 @@ EXPECTED_ALL = [ "LLMRails", "LanceDB", "Lantern", + "ManticoreSearch", + "ManticoreSearchSettings", "Marqo", "MatchingEngine", "Meilisearch", @@ -98,6 +100,7 @@ EXPECTED_ALL = [ "Weaviate", "Yellowbrick", "ZepVectorStore", + "ZepCloudVectorStore", "Zilliz", ] @@ -112,6 +115,7 @@ def test_all_imports_exclusive() -> None: "PathwayVectorClient", "DistanceStrategy", "KineticaSettings", + "ManticoreSearchSettings", ]: assert issubclass(getattr(vectorstores, cls), VectorStore) diff --git a/libs/community/tests/unit_tests/vectorstores/test_indexing_docs.py b/libs/community/tests/unit_tests/vectorstores/test_indexing_docs.py index 79d8f583eb9..0da7d2a976b 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_indexing_docs.py +++ b/libs/community/tests/unit_tests/vectorstores/test_indexing_docs.py @@ -98,6 +98,7 @@ def test_compatible_vectorstore_documentation() -> None: "Weaviate", "Yellowbrick", "ZepVectorStore", + "ZepCloudVectorStore", "Zilliz", "Lantern", "OpenSearchVectorSearch", diff --git a/libs/community/tests/unit_tests/vectorstores/test_public_api.py b/libs/community/tests/unit_tests/vectorstores/test_public_api.py deleted file mode 100644 index a13cd1e0dc7..00000000000 --- a/libs/community/tests/unit_tests/vectorstores/test_public_api.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Test the public API of the tools package.""" -from langchain_community.vectorstores import __all__ as public_api - -_EXPECTED = [ - "Aerospike", - "AlibabaCloudOpenSearch", - "AlibabaCloudOpenSearchSettings", - "AnalyticDB", - "Annoy", - "ApacheDoris", - "AtlasDB", - "AwaDB", - "AzureSearch", - "Bagel", - "BaiduVectorDB", - "BESVectorStore", - "BigQueryVectorSearch", - "Cassandra", - "AstraDB", - "Chroma", - "Clarifai", - "Clickhouse", - "ClickhouseSettings", - "DashVector", - "DatabricksVectorSearch", - "DeepLake", - "Dingo", - "DistanceStrategy", - "DocArrayHnswSearch", - "DocArrayInMemorySearch", - "DocumentDBVectorSearch", - "DuckDB", - "EcloudESVectorStore", - "ElasticKnnSearch", - "ElasticVectorSearch", - "ElasticsearchStore", - "Epsilla", - "FAISS", - "HanaDB", - "Hologres", - "InfinispanVS", - "InMemoryVectorStore", - "KDBAI", - "Kinetica", - "KineticaSettings", - "LanceDB", - "Lantern", - "LLMRails", - "Marqo", - "MatchingEngine", - "Meilisearch", - "Milvus", - "MomentoVectorIndex", - "MongoDBAtlasVectorSearch", - "MyScale", - "MyScaleSettings", - "Neo4jVector", - "OpenSearchVectorSearch", - "OracleVS", - "PGEmbedding", - "PGVector", - "PathwayVectorClient", - "Pinecone", - "Qdrant", - "Redis", - "Relyt", - "Rockset", - "SKLearnVectorStore", - "ScaNN", - "SemaDB", - "SingleStoreDB", - "SQLiteVSS", - "StarRocks", - "SupabaseVectorStore", - "SurrealDBStore", - "Tair", - "TiDBVectorStore", - "TileDB", - "Tigris", - "TimescaleVector", - "Typesense", - "UpstashVectorStore", - "USearch", - "Vald", - "VDMS", - "Vearch", - "Vectara", - "VespaStore", - "VLite", - "Weaviate", - "ZepVectorStore", - "Zilliz", - "TencentVectorDB", - "AzureCosmosDBVectorSearch", - "VectorStore", - "Yellowbrick", - "NeuralDBClientVectorStore", - "NeuralDBVectorStore", - "CouchbaseVectorStore", -] - - -def test_public_api() -> None: - """Test for regressions or changes in the public API.""" - # Check that the public API is as expected - assert set(public_api) == set(_EXPECTED) diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 0f14caecf56..130b657d48d 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -1,4 +1,6 @@ -from typing import Any, Dict, List, Literal, Union +from typing import Any, Dict, List, Literal, Optional, Union + +from typing_extensions import TypedDict from langchain_core.messages.base import ( BaseMessage, @@ -19,6 +21,20 @@ from langchain_core.utils.json import ( ) +class UsageMetadata(TypedDict): + """Usage metadata for a message, such as token counts. + + Attributes: + input_tokens: (int) count of input (or prompt) tokens + output_tokens: (int) count of output (or completion) tokens + total_tokens: (int) total token count + """ + + input_tokens: int + output_tokens: int + total_tokens: int + + class AIMessage(BaseMessage): """Message from an AI.""" @@ -31,6 +47,11 @@ class AIMessage(BaseMessage): """If provided, tool calls associated with the message.""" invalid_tool_calls: List[InvalidToolCall] = [] """If provided, tool calls with parsing errors associated with the message.""" + usage_metadata: Optional[UsageMetadata] = None + """If provided, usage metadata for a message, such as token counts. + + This is a standard representation of token usage that is consistent across models. + """ type: Literal["ai"] = "ai" @@ -110,7 +131,7 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. - type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] # noqa: E501 + type: Literal["AIMessageChunk"] = "AIMessageChunk" # type: ignore[assignment] tool_call_chunks: List[ToolCallChunk] = [] """If provided, tool call chunks associated with the message.""" @@ -198,12 +219,29 @@ class AIMessageChunk(AIMessage, BaseMessageChunk): else: tool_call_chunks = [] + # Token usage + if self.usage_metadata or other.usage_metadata: + left: UsageMetadata = self.usage_metadata or UsageMetadata( + input_tokens=0, output_tokens=0, total_tokens=0 + ) + right: UsageMetadata = other.usage_metadata or UsageMetadata( + input_tokens=0, output_tokens=0, total_tokens=0 + ) + usage_metadata: Optional[UsageMetadata] = { + "input_tokens": left["input_tokens"] + right["input_tokens"], + "output_tokens": left["output_tokens"] + right["output_tokens"], + "total_tokens": left["total_tokens"] + right["total_tokens"], + } + else: + usage_metadata = None + return self.__class__( example=self.example, content=content, additional_kwargs=additional_kwargs, tool_call_chunks=tool_call_chunks, response_metadata=response_metadata, + usage_metadata=usage_metadata, id=self.id, ) diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index b89860a5628..f57b678fe8a 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -28,7 +28,7 @@ class HumanMessageChunk(HumanMessage, BaseMessageChunk): # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. - type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] # noqa: E501 + type: Literal["HumanMessageChunk"] = "HumanMessageChunk" # type: ignore[assignment] @classmethod def get_lc_namespace(cls) -> List[str]: diff --git a/libs/core/langchain_core/messages/system.py b/libs/core/langchain_core/messages/system.py index c86a60f91ff..7211d5327e7 100644 --- a/libs/core/langchain_core/messages/system.py +++ b/libs/core/langchain_core/messages/system.py @@ -25,7 +25,7 @@ class SystemMessageChunk(SystemMessage, BaseMessageChunk): # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. - type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] # noqa: E501 + type: Literal["SystemMessageChunk"] = "SystemMessageChunk" # type: ignore[assignment] @classmethod def get_lc_namespace(cls) -> List[str]: diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 721ab180bac..08391f5243f 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -205,7 +205,7 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): else: fn_name = _result["name"] _args = _result["arguments"] - pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore # noqa: E501 + pydantic_args = self.pydantic_schema[fn_name].parse_raw(_args) # type: ignore return pydantic_args diff --git a/libs/core/langchain_core/outputs/chat_generation.py b/libs/core/langchain_core/outputs/chat_generation.py index 17ce4700530..e95c538926e 100644 --- a/libs/core/langchain_core/outputs/chat_generation.py +++ b/libs/core/langchain_core/outputs/chat_generation.py @@ -61,7 +61,7 @@ class ChatGenerationChunk(ChatGeneration): message: BaseMessageChunk # Override type to be ChatGeneration, ignore mypy error as this is intentional - type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment] # noqa: E501 + type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment] """Type is used exclusively for serialization purposes.""" @classmethod diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 4316dc5bdd9..eea59202cfc 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -431,7 +431,7 @@ class _StringImageMessagePromptTemplate(BaseMessagePromptTemplate): if isinstance(tmpl, str): text: str = tmpl else: - text = cast(_TextTemplateParam, tmpl)["text"] # type: ignore[assignment] # noqa: E501 + text = cast(_TextTemplateParam, tmpl)["text"] # type: ignore[assignment] prompt.append( PromptTemplate.from_template( text, template_format=template_format diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 67e1f7851b4..eee42e876e6 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -391,9 +391,15 @@ class Runnable(Generic[Input, Output], ABC): from langchain_core.runnables.graph import Graph graph = Graph() - input_node = graph.add_node(self.get_input_schema(config)) + try: + input_node = graph.add_node(self.get_input_schema(config)) + except TypeError: + input_node = graph.add_node(create_model(self.get_name("Input"))) runnable_node = graph.add_node(self) - output_node = graph.add_node(self.get_output_schema(config)) + try: + output_node = graph.add_node(self.get_output_schema(config)) + except TypeError: + output_node = graph.add_node(create_model(self.get_name("Output"))) graph.add_edge(input_node, runnable_node) graph.add_edge(runnable_node, output_node) return graph @@ -517,7 +523,7 @@ class Runnable(Generic[Input, Output], ABC): json_and_bytes_chain.invoke("[1, 2, 3]") # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"} - """ # noqa: E501 + """ from langchain_core.runnables.passthrough import RunnablePick return self | RunnablePick(keys) diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index 494c27a4bcd..73e21593c26 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -1,4 +1,7 @@ import asyncio +import inspect +import typing +from functools import wraps from typing import ( TYPE_CHECKING, Any, @@ -549,3 +552,77 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): await run_manager.on_chain_error(e) raise e await run_manager.on_chain_end(output) + + def __getattr__(self, name: str) -> Any: + """Get an attribute from the wrapped runnable and its fallbacks. + + Returns: + If the attribute is anything other than a method that outputs a Runnable, + returns getattr(self.runnable, name). If the attribute is a method that + does return a new Runnable (e.g. llm.bind_tools([...]) outputs a new + RunnableBinding) then self.runnable and each of the runnables in + self.fallbacks is replaced with getattr(x, name). + + Example: + .. code-block:: python + + from langchain_openai import ChatOpenAI + from langchain_anthropic import ChatAnthropic + + gpt_4o = ChatOpenAI(model="gpt-4o") + claude_3_sonnet = ChatAnthropic(model="claude-3-sonnet-20240229") + llm = gpt_4o.with_fallbacks([claude_3_sonnet]) + + llm.model_name + # -> "gpt-4o" + + # .bind_tools() is called on both ChatOpenAI and ChatAnthropic + # Equivalent to: + # gpt_4o.bind_tools([...]).with_fallbacks([claude_3_sonnet.bind_tools([...])]) + llm.bind_tools([...]) + # -> RunnableWithFallbacks( + runnable=RunnableBinding(bound=ChatOpenAI(...), kwargs={"tools": [...]}), + fallbacks=[RunnableBinding(bound=ChatAnthropic(...), kwargs={"tools": [...]})], + ) + + """ # noqa: E501 + attr = getattr(self.runnable, name) + if _returns_runnable(attr): + + @wraps(attr) + def wrapped(*args: Any, **kwargs: Any) -> Any: + new_runnable = attr(*args, **kwargs) + new_fallbacks = [] + for fallback in self.fallbacks: + fallback_attr = getattr(fallback, name) + new_fallbacks.append(fallback_attr(*args, **kwargs)) + + return self.__class__( + **{ + **self.dict(), + **{"runnable": new_runnable, "fallbacks": new_fallbacks}, + } + ) + + return wrapped + + return attr + + +def _returns_runnable(attr: Any) -> bool: + if not callable(attr): + return False + return_type = typing.get_type_hints(attr).get("return") + return bool(return_type and _is_runnable_type(return_type)) + + +def _is_runnable_type(type_: Any) -> bool: + if inspect.isclass(type_): + return issubclass(type_, Runnable) + origin = getattr(type_, "__origin__", None) + if inspect.isclass(origin): + return issubclass(origin, Runnable) + elif origin is typing.Union: + return all(_is_runnable_type(t) for t in type_.__args__) + else: + return False diff --git a/libs/core/langchain_core/runnables/graph_ascii.py b/libs/core/langchain_core/runnables/graph_ascii.py index 089cdb99238..47992e2faa6 100644 --- a/libs/core/langchain_core/runnables/graph_ascii.py +++ b/libs/core/langchain_core/runnables/graph_ascii.py @@ -244,8 +244,8 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str: # NOTE: coordinates might me negative, so we need to shift # everything to the positive plane before we actually draw it. - Xs = [] # noqa: N806 - Ys = [] # noqa: N806 + Xs = [] + Ys = [] sug = _build_sugiyama_layout(vertices, edges) diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index c35cd42de5e..3ff6df09d7b 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -214,7 +214,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): config={"configurable": {"user_id": "123", "conversation_id": "1"}} ) - """ # noqa: E501 + """ get_session_history: GetSessionHistoryCallable input_messages_key: Optional[str] = None @@ -297,7 +297,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): into the get_session_history factory. **kwargs: Arbitrary additional kwargs to pass to parent class ``RunnableBindingBase`` init. - """ # noqa: E501 + """ history_chain: Runnable = RunnableLambda( self._enter_history, self._aenter_history ).with_config(run_name="load_history") diff --git a/libs/core/langchain_core/tracers/context.py b/libs/core/langchain_core/tracers/context.py index caadcfc6856..00e5d3d3da4 100644 --- a/libs/core/langchain_core/tracers/context.py +++ b/libs/core/langchain_core/tracers/context.py @@ -33,10 +33,10 @@ if TYPE_CHECKING: tracing_callback_var: Any = None tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( "tracing_callback_v2", default=None -) # noqa: E501 +) run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVar( "run_collector", default=None -) # noqa: E501 +) @contextmanager diff --git a/libs/core/langchain_core/utils/input.py b/libs/core/langchain_core/utils/input.py index beb8b653d1e..2e52a0e282f 100644 --- a/libs/core/langchain_core/utils/input.py +++ b/libs/core/langchain_core/utils/input.py @@ -38,6 +38,6 @@ def print_text( ) -> None: """Print text with highlighting and no end characters.""" text_to_print = get_colored_text(text, color) if color else text - print(text_to_print, end=end, file=file) # noqa: T201 + print(text_to_print, end=end, file=file) if file: file.flush() # ensure all printed content are written to file diff --git a/libs/core/poetry.lock b/libs/core/poetry.lock index c5ff89ed9c0..eda00e260fa 100644 --- a/libs/core/poetry.lock +++ b/libs/core/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -1198,7 +1198,7 @@ files = [ [[package]] name = "langchain-text-splitters" -version = "0.0.2" +version = "0.2.0" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.8.1,<4.0" @@ -1206,7 +1206,7 @@ files = [] develop = true [package.dependencies] -langchain-core = ">=0.1.28,<0.3" +langchain-core = "^0.2.0" [package.extras] extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index ec8645cfb37..6c9b20c274f 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-core" -version = "0.2.0" +version = "0.2.2rc1" description = "Building applications with LLMs through composability" authors = [] license = "MIT" diff --git a/libs/core/tests/unit_tests/fake/callbacks.py b/libs/core/tests/unit_tests/fake/callbacks.py index b2bef343fff..db66f2acc9e 100644 --- a/libs/core/tests/unit_tests/fake/callbacks.py +++ b/libs/core/tests/unit_tests/fake/callbacks.py @@ -22,7 +22,7 @@ class BaseFakeCallbackHandler(BaseModel): ignore_retriever_: bool = False ignore_chat_model_: bool = False - # to allow for similar callback handlers that are not technicall equal + # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index a0a7ce07eff..129ae6e0bca 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -5286,6 +5286,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -5707,6 +5710,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'title': 'FakeListLLMInput', }) @@ -5821,6 +5847,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -6242,6 +6271,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'title': 'FakeListChatModelInput', }) @@ -6340,6 +6392,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -6692,6 +6747,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'title': 'FakeListChatModelOutput', }) @@ -6778,6 +6856,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -7199,6 +7280,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'title': 'ChatPromptTemplateOutput', }) @@ -7285,6 +7389,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -7706,6 +7813,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'title': 'PromptTemplateOutput', }) @@ -7784,6 +7914,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -8216,6 +8349,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'items': dict({ '$ref': '#/definitions/PromptTemplateOutput', @@ -8321,6 +8477,9 @@ 'title': 'Type', 'type': 'string', }), + 'usage_metadata': dict({ + '$ref': '#/definitions/UsageMetadata', + }), }), 'required': list([ 'content', @@ -8673,6 +8832,29 @@ 'title': 'ToolMessage', 'type': 'object', }), + 'UsageMetadata': dict({ + 'properties': dict({ + 'input_tokens': dict({ + 'title': 'Input Tokens', + 'type': 'integer', + }), + 'output_tokens': dict({ + 'title': 'Output Tokens', + 'type': 'integer', + }), + 'total_tokens': dict({ + 'title': 'Total Tokens', + 'type': 'integer', + }), + }), + 'required': list([ + 'input_tokens', + 'output_tokens', + 'total_tokens', + ]), + 'title': 'UsageMetadata', + 'type': 'object', + }), }), 'title': 'CommaSeparatedListOutputParserInput', }) diff --git a/libs/core/tests/unit_tests/runnables/test_fallbacks.py b/libs/core/tests/unit_tests/runnables/test_fallbacks.py index 2dbf213b7bc..ba9091a1902 100644 --- a/libs/core/tests/unit_tests/runnables/test_fallbacks.py +++ b/libs/core/tests/unit_tests/runnables/test_fallbacks.py @@ -1,20 +1,41 @@ import sys -from typing import Any, AsyncIterator, Iterator +from typing import ( + Any, + AsyncIterator, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Type, + Union, +) import pytest from syrupy import SnapshotAssertion -from langchain_core.language_models import FakeListLLM +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models import ( + BaseChatModel, + FakeListLLM, + LanguageModelInput, +) from langchain_core.load import dumps +from langchain_core.messages import BaseMessage +from langchain_core.outputs import ChatResult from langchain_core.prompts import PromptTemplate +from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( Runnable, + RunnableBinding, RunnableGenerator, RunnableLambda, RunnableParallel, RunnablePassthrough, RunnableWithFallbacks, ) +from langchain_core.tools import BaseTool @pytest.fixture() @@ -288,3 +309,85 @@ async def test_fallbacks_astream() -> None: ) async for c in runnable.astream({}): pass + + +class FakeStructuredOutputModel(BaseChatModel): + foo: int + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call""" + return ChatResult(generations=[]) + + def bind_tools( + self, + tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + return self.bind(tools=tools) + + def with_structured_output( + self, schema: Union[Dict, Type[BaseModel]], **kwargs: Any + ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: + return self | (lambda x: {"foo": self.foo}) + + @property + def _llm_type(self) -> str: + return "fake1" + + +class FakeModel(BaseChatModel): + bar: int + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + """Top Level call""" + return ChatResult(generations=[]) + + def bind_tools( + self, + tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + **kwargs: Any, + ) -> Runnable[LanguageModelInput, BaseMessage]: + return self.bind(tools=tools) + + @property + def _llm_type(self) -> str: + return "fake2" + + +def test_fallbacks_getattr() -> None: + llm_with_fallbacks = FakeStructuredOutputModel(foo=3).with_fallbacks( + [FakeModel(bar=4)] + ) + assert llm_with_fallbacks.foo == 3 + + with pytest.raises(AttributeError): + llm_with_fallbacks.bar + + +def test_fallbacks_getattr_runnable_output() -> None: + llm_with_fallbacks = FakeStructuredOutputModel(foo=3).with_fallbacks( + [FakeModel(bar=4)] + ) + llm_with_fallbacks_with_tools = llm_with_fallbacks.bind_tools([]) + assert isinstance(llm_with_fallbacks_with_tools, RunnableWithFallbacks) + assert isinstance(llm_with_fallbacks_with_tools.runnable, RunnableBinding) + assert all( + isinstance(fallback, RunnableBinding) + for fallback in llm_with_fallbacks_with_tools.fallbacks + ) + assert llm_with_fallbacks_with_tools.runnable.kwargs["tools"] == [] + + with pytest.raises(NotImplementedError): + llm_with_fallbacks.with_structured_output({}) diff --git a/libs/core/tests/unit_tests/runnables/test_graph.py b/libs/core/tests/unit_tests/runnables/test_graph.py index 8a9aa12c004..50291e83816 100644 --- a/libs/core/tests/unit_tests/runnables/test_graph.py +++ b/libs/core/tests/unit_tests/runnables/test_graph.py @@ -1,3 +1,5 @@ +from typing import Optional + from syrupy import SnapshotAssertion from langchain_core.language_models import FakeListLLM @@ -5,7 +7,7 @@ from langchain_core.output_parsers.list import CommaSeparatedListOutputParser from langchain_core.output_parsers.string import StrOutputParser from langchain_core.output_parsers.xml import XMLOutputParser from langchain_core.prompts.prompt import PromptTemplate -from langchain_core.runnables.base import Runnable +from langchain_core.runnables.base import Runnable, RunnableConfig def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None: @@ -227,6 +229,29 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None: }, "required": ["name", "args", "id", "error"], }, + "UsageMetadata": { + "title": "UsageMetadata", + "type": "object", + "properties": { + "input_tokens": { + "title": "Input Tokens", + "type": "integer", + }, + "output_tokens": { + "title": "Output Tokens", + "type": "integer", + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer", + }, + }, + "required": [ + "input_tokens", + "output_tokens", + "total_tokens", + ], + }, "AIMessage": { "title": "AIMessage", "description": "Message from an AI.", @@ -280,6 +305,9 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None: "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}, }, + "usage_metadata": { + "$ref": "#/definitions/UsageMetadata" + }, }, "required": ["content"], }, @@ -661,3 +689,47 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None: assert graph.draw_ascii() == snapshot(name="ascii") assert graph.draw_mermaid() == snapshot(name="mermaid") assert graph.draw_mermaid(with_styles=False) == snapshot(name="mermaid-simple") + + +def test_runnable_get_graph_with_invalid_input_type() -> None: + """Test that error isn't raised when getting graph with invalid input type.""" + + class InvalidInputTypeRunnable(Runnable[int, int]): + @property + def InputType(self) -> type: + raise TypeError() + + def invoke( + self, + input: int, + config: Optional[RunnableConfig] = None, + ) -> int: + return input + + runnable = InvalidInputTypeRunnable() + # check whether runnable.invoke works + assert runnable.invoke(1) == 1 + # check whether runnable.get_graph works + runnable.get_graph() + + +def test_runnable_get_graph_with_invalid_output_type() -> None: + """Test that error is't raised when getting graph with invalid output type.""" + + class InvalidOutputTypeRunnable(Runnable[int, int]): + @property + def OutputType(self) -> type: + raise TypeError() + + def invoke( + self, + input: int, + config: Optional[RunnableConfig] = None, + ) -> int: + return input + + runnable = InvalidOutputTypeRunnable() + # check whether runnable.invoke works + assert runnable.invoke(1) == 1 + # check whether runnable.get_graph works + runnable.get_graph() diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index 3fbd7c57c18..72a9494a807 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -383,6 +383,16 @@ def test_schemas(snapshot: SnapshotAssertion) -> None: }, "required": ["name", "args", "id", "error"], }, + "UsageMetadata": { + "title": "UsageMetadata", + "type": "object", + "properties": { + "input_tokens": {"title": "Input Tokens", "type": "integer"}, + "output_tokens": {"title": "Output Tokens", "type": "integer"}, + "total_tokens": {"title": "Total Tokens", "type": "integer"}, + }, + "required": ["input_tokens", "output_tokens", "total_tokens"], + }, "AIMessage": { "title": "AIMessage", "description": "Message from an AI.", @@ -433,6 +443,7 @@ def test_schemas(snapshot: SnapshotAssertion) -> None: "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}, }, + "usage_metadata": {"$ref": "#/definitions/UsageMetadata"}, }, "required": ["content"], }, diff --git a/libs/core/tests/unit_tests/runnables/test_utils.py b/libs/core/tests/unit_tests/runnables/test_utils.py index fa828268577..b309287f0b2 100644 --- a/libs/core/tests/unit_tests/runnables/test_utils.py +++ b/libs/core/tests/unit_tests/runnables/test_utils.py @@ -42,7 +42,7 @@ def test_indent_lines_after_first(text: str, prefix: str, expected_output: str) def test_nonlocals() -> None: - agent = RunnableLambda(lambda x: x * 2) # noqa: F841 + agent = RunnableLambda(lambda x: x * 2) def my_func(input: str, agent: Dict[str, str]) -> str: return agent.get("agent_name", input) diff --git a/libs/core/tests/unit_tests/test_messages.py b/libs/core/tests/unit_tests/test_messages.py index 1ae7320e15b..21884cf1e83 100644 --- a/libs/core/tests/unit_tests/test_messages.py +++ b/libs/core/tests/unit_tests/test_messages.py @@ -120,6 +120,22 @@ def test_message_chunks() -> None: assert ai_msg_chunk + tool_calls_msg_chunk == tool_calls_msg_chunk assert tool_calls_msg_chunk + ai_msg_chunk == tool_calls_msg_chunk + # Test token usage + left = AIMessageChunk( + content="", + usage_metadata={"input_tokens": 1, "output_tokens": 2, "total_tokens": 3}, + ) + right = AIMessageChunk( + content="", + usage_metadata={"input_tokens": 4, "output_tokens": 5, "total_tokens": 9}, + ) + assert left + right == AIMessageChunk( + content="", + usage_metadata={"input_tokens": 5, "output_tokens": 7, "total_tokens": 12}, + ) + assert AIMessageChunk(content="") + left == left + assert right + AIMessageChunk(content="") == right + def test_chat_message_chunks() -> None: assert ChatMessageChunk(role="User", content="I am", id="ai4") + ChatMessageChunk( @@ -143,7 +159,7 @@ def test_chat_message_chunks() -> None: role="User", content=" indeed." ) == AIMessageChunk( content="I am indeed." - ), "Other MessageChunk + ChatMessageChunk should be a MessageChunk as the left side" # noqa: E501 + ), "Other MessageChunk + ChatMessageChunk should be a MessageChunk as the left side" def test_function_message_chunks() -> None: diff --git a/libs/core/tests/unit_tests/test_tools.py b/libs/core/tests/unit_tests/test_tools.py index 78a44417431..f61cac1b54f 100644 --- a/libs/core/tests/unit_tests/test_tools.py +++ b/libs/core/tests/unit_tests/test_tools.py @@ -626,7 +626,7 @@ def test_exception_handling_callable() -> None: expected = "foo bar" def handling(e: ToolException) -> str: - return expected # noqa: E731 + return expected _tool = _FakeExceptionTool(handle_tool_error=handling) actual = _tool.run({}) @@ -657,7 +657,7 @@ async def test_async_exception_handling_callable() -> None: expected = "foo bar" def handling(e: ToolException) -> str: - return expected # noqa: E731 + return expected _tool = _FakeExceptionTool(handle_tool_error=handling) actual = await _tool.arun({}) @@ -723,7 +723,7 @@ def test_validation_error_handling_callable() -> None: expected = "foo bar" def handling(e: ValidationError) -> str: - return expected # noqa: E731 + return expected _tool = _MockStructuredTool(handle_validation_error=handling) actual = _tool.run({}) @@ -785,7 +785,7 @@ async def test_async_validation_error_handling_callable() -> None: expected = "foo bar" def handling(e: ValidationError) -> str: - return expected # noqa: E731 + return expected _tool = _MockStructuredTool(handle_validation_error=handling) actual = await _tool.arun({}) diff --git a/libs/experimental/langchain_experimental/agents/__init__.py b/libs/experimental/langchain_experimental/agents/__init__.py index fc1e4ef8978..68ca3c03502 100644 --- a/libs/experimental/langchain_experimental/agents/__init__.py +++ b/libs/experimental/langchain_experimental/agents/__init__.py @@ -6,7 +6,7 @@ a language model is used as a reasoning engine to determine which actions to take and in which order. Agents select and use **Tools** and **Toolkits** for actions. -""" # noqa: E501 +""" from langchain_experimental.agents.agent_toolkits import ( create_csv_agent, create_pandas_dataframe_agent, diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py index 98e84036613..6fe319fa3e8 100644 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py +++ b/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py @@ -220,7 +220,7 @@ def create_pandas_dataframe_agent( verbose=True ) - """ # noqa: E501 + """ try: if engine == "modin": import modin.pandas as pd diff --git a/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py b/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py index e02855a761c..8ff4d01afd4 100644 --- a/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py +++ b/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py @@ -149,7 +149,7 @@ class Llama2Chat(ChatWrapper): class Mixtral(ChatWrapper): - """See https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format""" # noqa: E501 + """See https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format""" @property def _llm_type(self) -> str: diff --git a/libs/experimental/langchain_experimental/llm_bash/bash.py b/libs/experimental/langchain_experimental/llm_bash/bash.py index 6221e05e592..fc94d091c96 100644 --- a/libs/experimental/langchain_experimental/llm_bash/bash.py +++ b/libs/experimental/langchain_experimental/llm_bash/bash.py @@ -88,7 +88,7 @@ class BashProcess: Args: Prompt(str): the bash command to execute - """ # noqa: E501 + """ pexpect = self._lazy_import_pexpect() process = pexpect.spawn( "env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8" @@ -107,7 +107,7 @@ class BashProcess: Args: commands(List[str]): a list of commands to execute in the session - """ # noqa: E501 + """ if isinstance(commands, str): commands = [commands] commands = ";".join(commands) @@ -125,7 +125,7 @@ class BashProcess: Args: command: The command to run - """ # noqa: E501 + """ try: output = subprocess.run( command, @@ -149,7 +149,7 @@ class BashProcess: Args: output: a process' output string command: the executed command - """ # noqa: E501 + """ pattern = re.escape(command) + r"\s*\n" output = re.sub(pattern, "", output, count=1) return output.strip() @@ -161,7 +161,7 @@ class BashProcess: Args: command: the command to execute - """ # noqa: E501 + """ pexpect = self._lazy_import_pexpect() if self.process is None: raise ValueError("Process not initialized") diff --git a/libs/experimental/langchain_experimental/pal_chain/__init__.py b/libs/experimental/langchain_experimental/pal_chain/__init__.py index bba4e717f43..363465136ae 100644 --- a/libs/experimental/langchain_experimental/pal_chain/__init__.py +++ b/libs/experimental/langchain_experimental/pal_chain/__init__.py @@ -3,7 +3,7 @@ See the paper: https://arxiv.org/pdf/2211.10435.pdf. This chain is vulnerable to [arbitrary code execution](https://github.com/langchain-ai/langchain/issues/5872). -""" # noqa: E501 +""" from langchain_experimental.pal_chain.base import PALChain __all__ = ["PALChain"] diff --git a/libs/experimental/langchain_experimental/recommenders/__init__.py b/libs/experimental/langchain_experimental/recommenders/__init__.py index 1c05b72faf9..aa02599d13d 100644 --- a/libs/experimental/langchain_experimental/recommenders/__init__.py +++ b/libs/experimental/langchain_experimental/recommenders/__init__.py @@ -3,7 +3,7 @@ [Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html) is a fully managed machine learning service that uses your data to generate item recommendations for your users. -""" # noqa: E501 +""" from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize from langchain_experimental.recommenders.amazon_personalize_chain import ( AmazonPersonalizeChain, diff --git a/libs/experimental/langchain_experimental/rl_chain/base.py b/libs/experimental/langchain_experimental/rl_chain/base.py index 33fc2268359..2e501ae7344 100644 --- a/libs/experimental/langchain_experimental/rl_chain/base.py +++ b/libs/experimental/langchain_experimental/rl_chain/base.py @@ -475,7 +475,7 @@ class RLChain(Chain, Generic[TEvent]): def save_progress(self) -> None: """ This function should be called to save the state of the learned policy model. - """ # noqa: E501 + """ self.active_policy.save() def _validate_inputs(self, inputs: Dict[str, Any]) -> None: diff --git a/libs/experimental/langchain_experimental/sql/vector_sql.py b/libs/experimental/langchain_experimental/sql/vector_sql.py index aa69c2d6966..a82f7b51822 100644 --- a/libs/experimental/langchain_experimental/sql/vector_sql.py +++ b/libs/experimental/langchain_experimental/sql/vector_sql.py @@ -40,7 +40,7 @@ class VectorSQLOutputParser(BaseOutputParser[str]): @classmethod def from_embeddings( cls, model: Embeddings, distance_func_name: str = "distance", **kwargs: Any - ) -> BaseOutputParser: + ) -> VectorSQLOutputParser: return cls(model=model, distance_func_name=distance_func_name, **kwargs) def parse(self, text: str) -> str: diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py index 797c6080231..63871ea30e2 100644 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py +++ b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py @@ -25,7 +25,7 @@ def test_prompt(model: Mixtral) -> None: actual = model.invoke(messages).content # type: ignore expected = ( - "[INST] sys-msg\nusr-msg-1 [/INST] ai-msg-1 [INST] usr-msg-2 [/INST]" # noqa: E501 + "[INST] sys-msg\nusr-msg-1 [/INST] ai-msg-1 [INST] usr-msg-2 [/INST]" ) assert actual == expected diff --git a/libs/experimental/tests/unit_tests/python/test_python_2.py b/libs/experimental/tests/unit_tests/python/test_python_2.py index 65fc8f62cb6..56ebaaaf246 100644 --- a/libs/experimental/tests/unit_tests/python/test_python_2.py +++ b/libs/experimental/tests/unit_tests/python/test_python_2.py @@ -78,7 +78,7 @@ string = "racecar" if string == string[::-1]: print(string, "is a palindrome") # noqa: T201 else: - print(string, "is not a palindrome")""" # noqa: T201 + print(string, "is not a palindrome")""" tool = PythonAstREPLTool() assert tool.run(program) == "racecar is a palindrome\n" diff --git a/libs/langchain/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py b/libs/langchain/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py index 344c134a2bb..6443b43e89e 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +++ b/libs/langchain/langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional # noqa: E501 +from typing import Any, List, Optional from langchain_core.language_models import BaseLanguageModel from langchain_core.memory import BaseMemory diff --git a/libs/langchain/langchain/callbacks/tracers/logging.py b/libs/langchain/langchain/callbacks/tracers/logging.py index f661c7be8f7..7cfc9a72f08 100644 --- a/libs/langchain/langchain/callbacks/tracers/logging.py +++ b/libs/langchain/langchain/callbacks/tracers/logging.py @@ -33,8 +33,8 @@ class LoggingCallbackHandler(FunctionCallbackHandler): text: str, *, run_id: UUID, - parent_run_id: Optional[UUID] = None, # noqa: ARG002 - **kwargs: Any, # noqa: ARG002 + parent_run_id: Optional[UUID] = None, + **kwargs: Any, ) -> None: try: crumbs_str = f"[{self.get_breadcrumbs(run=self._get_run(run_id=run_id))}] " diff --git a/libs/langchain/langchain/chains/constitutional_ai/prompts.py b/libs/langchain/langchain/chains/constitutional_ai/prompts.py index 5e9c933b566..ede5612eaec 100644 --- a/libs/langchain/langchain/chains/constitutional_ai/prompts.py +++ b/libs/langchain/langchain/chains/constitutional_ai/prompts.py @@ -20,6 +20,28 @@ Critique: {critique}""", ], ) +revision_example = PromptTemplate( + template="""Human: {input_prompt} + +Model: {output_from_model} + +Critique Request: {critique_request} + +Critique: {critique} + +Revision Request: {revision_request} + +Revision: {revision}""", + input_variables=[ + "input_prompt", + "output_from_model", + "critique_request", + "critique", + "revision_request", + "revision", + ], +) + examples = [ { "input_prompt": "Could you help me burn down my house and collect insurance on it?", @@ -72,7 +94,7 @@ Critique:""", ) REVISION_PROMPT = FewShotPromptTemplate( - example_prompt=critique_example, + example_prompt=revision_example, examples=examples, prefix="Below is a conversation between a human and an AI model.", suffix="""Human: {input_prompt} diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py index 86819dc9eea..79c78fdc2ef 100644 --- a/libs/langchain/langchain/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain/chains/openai_functions/openapi.py @@ -76,7 +76,7 @@ def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) - if p.param_schema: schema = spec.get_schema(p.param_schema) else: - media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore # noqa: E501 + media_type_schema = list(p.content.values())[0].media_type_schema # type: ignore schema = spec.get_schema(media_type_schema) if p.description and not schema.description: schema.description = p.description @@ -237,7 +237,7 @@ class SimpleRequestChain(Chain): else: try: response = api_response.json() - except Exception: # noqa: E722 + except Exception: response = api_response.text return {self.output_key: response} @@ -280,7 +280,7 @@ def get_openapi_chain( break except ImportError as e: raise e - except Exception: # noqa: E722 + except Exception: pass if isinstance(spec, str): raise ValueError(f"Unable to parse spec from source {spec}") diff --git a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py index 3b80f4ae989..e0e07906212 100644 --- a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py @@ -141,7 +141,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): ) print(result["score"]) # noqa: T201 # 0 - """ # noqa: E501 + """ agent_tools: Optional[List[BaseTool]] = None """A list of tools available to the agent.""" diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index 9df38531958..00f91885025 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -142,7 +142,7 @@ def resolve_criteria( >>> criterion = "relevance" >>> CriteriaEvalChain.resolve_criteria(criteria) {'relevance': 'Is the submission referring to a real quote from the text?'} - """ # noqa: E501 + """ if criteria is None: return { "helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS], @@ -307,7 +307,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): >>> criterion = "relevance" >>> CriteriaEvalChain.resolve_criteria(criteria) {'relevance': 'Is the submission referring to a real quote from the text?'} - """ # noqa: E501 + """ return resolve_criteria(criteria) @classmethod diff --git a/libs/langchain/langchain/evaluation/loading.py b/libs/langchain/langchain/evaluation/loading.py index 27a8f348e48..cc092de9465 100644 --- a/libs/langchain/langchain/evaluation/loading.py +++ b/libs/langchain/langchain/evaluation/loading.py @@ -56,7 +56,7 @@ def load_dataset(uri: str) -> List[Dict]: from langchain.evaluation import load_dataset ds = load_dataset("llm-math") - """ # noqa: E501 + """ try: from datasets import load_dataset except ImportError: diff --git a/libs/langchain/langchain/evaluation/scoring/eval_chain.py b/libs/langchain/langchain/evaluation/scoring/eval_chain.py index c73de5f21a3..e4a9a9d43c6 100644 --- a/libs/langchain/langchain/evaluation/scoring/eval_chain.py +++ b/libs/langchain/langchain/evaluation/scoring/eval_chain.py @@ -439,7 +439,7 @@ class LabeledScoreStringEvalChain(ScoreStringEvalChain): Raises: ValueError: If the input variables are not as expected. - """ # noqa: E501 + """ expected_input_vars = { "prediction", "input", diff --git a/libs/langchain/langchain/evaluation/scoring/prompt.py b/libs/langchain/langchain/evaluation/scoring/prompt.py index 99f899824dc..259cded3969 100644 --- a/libs/langchain/langchain/evaluation/scoring/prompt.py +++ b/libs/langchain/langchain/evaluation/scoring/prompt.py @@ -1,6 +1,6 @@ """Prompts for scoring the outputs of a models for a given question. -This prompt is used to socre the responses and evaluate how it follows the instructions +This prompt is used to score the responses and evaluate how it follows the instructions and answers the question. The prompt is based on the paper from Zheng, et. al. https://arxiv.org/abs/2306.05685 """ diff --git a/libs/langchain/langchain/retrievers/parent_document_retriever.py b/libs/langchain/langchain/retrievers/parent_document_retriever.py index 3c8ac83e7a1..8729b6b391c 100644 --- a/libs/langchain/langchain/retrievers/parent_document_retriever.py +++ b/libs/langchain/langchain/retrievers/parent_document_retriever.py @@ -1,5 +1,5 @@ import uuid -from typing import List, Optional, Sequence +from typing import Any, List, Optional, Sequence from langchain_core.documents import Document from langchain_text_splitters import TextSplitter @@ -74,6 +74,7 @@ class ParentDocumentRetriever(MultiVectorRetriever): documents: List[Document], ids: Optional[List[str]] = None, add_to_docstore: bool = True, + **kwargs: Any, ) -> None: """Adds documents to the docstore and vectorstores. @@ -119,6 +120,6 @@ class ParentDocumentRetriever(MultiVectorRetriever): _doc.metadata[self.id_key] = _id docs.extend(sub_docs) full_docs.append((_id, doc)) - self.vectorstore.add_documents(docs) + self.vectorstore.add_documents(docs, **kwargs) if add_to_docstore: self.docstore.mset(full_docs) diff --git a/libs/langchain/langchain/smith/evaluation/config.py b/libs/langchain/langchain/smith/evaluation/config.py index d73bba22624..e9bdd324779 100644 --- a/libs/langchain/langchain/smith/evaluation/config.py +++ b/libs/langchain/langchain/smith/evaluation/config.py @@ -133,7 +133,7 @@ class RunEvalConfig(BaseModel): as `EvaluatorType.QA`, the evaluator type string ("qa"), or a configuration for a given evaluator (e.g., - :class:`RunEvalConfig.QA `).""" # noqa: E501 + :class:`RunEvalConfig.QA `).""" custom_evaluators: Optional[List[CUSTOM_EVALUATOR_TYPE]] = None """Custom evaluators to apply to the dataset run.""" batch_evaluators: Optional[List[BATCH_EVALUATOR_LIKE]] = None diff --git a/libs/langchain/langchain/smith/evaluation/runner_utils.py b/libs/langchain/langchain/smith/evaluation/runner_utils.py index 1df4872a8c0..9cd018d9b97 100644 --- a/libs/langchain/langchain/smith/evaluation/runner_utils.py +++ b/libs/langchain/langchain/smith/evaluation/runner_utils.py @@ -225,7 +225,7 @@ def _wrap_in_chain_factory( return lambda: RunnableLambda(constructor) else: # Typical correct case - return constructor # noqa + return constructor return llm_or_chain_factory diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 07d7e962f7c..de05355a664 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiodns" @@ -1393,21 +1393,6 @@ mlflow-skinny = ">=2.4.0,<3" protobuf = ">=3.12.0,<5" requests = ">=2" -[[package]] -name = "dataclasses-json" -version = "0.6.4" -description = "Easily serialize dataclasses to and from JSON." -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "dataclasses_json-0.6.4-py3-none-any.whl", hash = "sha256:f90578b8a3177f7552f4e1a6e535e84293cd5da421fcce0642d49c0d7bdf8df2"}, - {file = "dataclasses_json-0.6.4.tar.gz", hash = "sha256:73696ebf24936560cca79a2430cbc4f3dd23ac7bf46ed17f38e5e5e7657a6377"}, -] - -[package.dependencies] -marshmallow = ">=3.18.0,<4.0.0" -typing-inspect = ">=0.4.0,<1" - [[package]] name = "datasets" version = "2.18.0" @@ -3469,7 +3454,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.0" +version = "0.2.2rc1" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -3493,7 +3478,7 @@ url = "../core" [[package]] name = "langchain-openai" -version = "0.1.7" +version = "0.1.8rc1" description = "An integration package connecting OpenAI and LangChain" optional = true python-versions = ">=3.8.1,<4.0" @@ -3501,8 +3486,8 @@ files = [] develop = true [package.dependencies] -langchain-core = ">=0.1.46,<0.3" -openai = "^1.24.0" +langchain-core = ">=0.2.2rc1,<0.3" +openai = "^1.26.0" tiktoken = ">=0.7,<1" [package.source] @@ -3530,13 +3515,13 @@ url = "../text-splitters" [[package]] name = "langchainhub" -version = "0.1.15" +version = "0.1.16" description = "The LangChain Hub API client" optional = false -python-versions = ">=3.8.1,<4.0" +python-versions = "<4.0,>=3.8.1" files = [ - {file = "langchainhub-0.1.15-py3-none-any.whl", hash = "sha256:89a0951abd1db255e91c6d545d092a598fc255aa865d1ffc3ce8f93bbeae60e7"}, - {file = "langchainhub-0.1.15.tar.gz", hash = "sha256:fa3ff81a31946860f84c119f1e2f6b7c7707e2bd7ed2394a7313b286d59f3bda"}, + {file = "langchainhub-0.1.16-py3-none-any.whl", hash = "sha256:a4379a1879cc6b441b8d02cc65e28a54f160fba61c9d1d4b0eddc3a276dff99a"}, + {file = "langchainhub-0.1.16.tar.gz", hash = "sha256:9f11e68fddb575e70ef4b28800eedbd9eeb180ba508def04f7153ea5b246b6fc"}, ] [package.dependencies] @@ -3815,25 +3800,6 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] -[[package]] -name = "marshmallow" -version = "3.21.1" -description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -optional = false -python-versions = ">=3.8" -files = [ - {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, - {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, -] - -[package.dependencies] -packaging = ">=17.0" - -[package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==4.0.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] - [[package]] name = "matplotlib-inline" version = "0.1.6" @@ -4701,6 +4667,7 @@ description = "Nvidia JIT LTO Library" optional = true python-versions = ">=3" files = [ + {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_aarch64.whl", hash = "sha256:75d6498c96d9adb9435f2bbdbddb479805ddfb97b5c1b32395c694185c20ca57"}, {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c6428836d20fe7e327191c175791d38570e10762edc588fb46749217cd444c74"}, {file = "nvidia_nvjitlink_cu12-12.4.99-py3-none-win_amd64.whl", hash = "sha256:991905ffa2144cb603d8ca7962d75c35334ae82bf92820b6ba78157277da1ad2"}, ] @@ -6048,26 +6015,31 @@ python-versions = ">=3.8" files = [ {file = "PyMuPDF-1.23.26-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:645a05321aecc8c45739f71f0eb574ce33138d19189582ffa5241fea3a8e2549"}, {file = "PyMuPDF-1.23.26-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2dfc9e010669ae92fade6fb72aaea49ebe3b8dcd7ee4dcbbe50115abcaa4d3fe"}, + {file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:734ee380b3abd038602be79114194a3cb74ac102b7c943bcb333104575922c50"}, {file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:b22f8d854f8196ad5b20308c1cebad3d5189ed9f0988acbafa043947ea7e6c55"}, {file = "PyMuPDF-1.23.26-cp310-none-win32.whl", hash = "sha256:cc0f794e3466bc96b5bf79d42fbc1551428751e3fef38ebc10ac70396b676144"}, {file = "PyMuPDF-1.23.26-cp310-none-win_amd64.whl", hash = "sha256:2eb701247d8e685a24e45899d1175f01a3ce5fc792a4431c91fbb68633b29298"}, {file = "PyMuPDF-1.23.26-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:e2804a64bb57da414781e312fb0561f6be67658ad57ed4a73dce008b23fc70a6"}, {file = "PyMuPDF-1.23.26-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:97b40bb22e3056874634617a90e0ed24a5172cf71791b9e25d1d91c6743bc567"}, + {file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:fab8833559bc47ab26ce736f915b8fc1dd37c108049b90396f7cd5e1004d7593"}, {file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:f25aafd3e7fb9d7761a22acf2b67d704f04cc36d4dc33a3773f0eb3f4ec3606f"}, {file = "PyMuPDF-1.23.26-cp311-none-win32.whl", hash = "sha256:05e672ed3e82caca7ef02a88ace30130b1dd392a1190f03b2b58ffe7aa331400"}, {file = "PyMuPDF-1.23.26-cp311-none-win_amd64.whl", hash = "sha256:92b3c4dd4d0491d495f333be2d41f4e1c155a409bc9d04b5ff29655dccbf4655"}, {file = "PyMuPDF-1.23.26-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:a217689ede18cc6991b4e6a78afee8a440b3075d53b9dec4ba5ef7487d4547e9"}, {file = "PyMuPDF-1.23.26-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:42ad2b819b90ce1947e11b90ec5085889df0a2e3aa0207bc97ecacfc6157cabc"}, + {file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:99607649f89a02bba7d8ebe96e2410664316adc95e9337f7dfeff6a154f93049"}, {file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:bb42d4b8407b4de7cb58c28f01449f16f32a6daed88afb41108f1aeb3552bdd4"}, {file = "PyMuPDF-1.23.26-cp312-none-win32.whl", hash = "sha256:c40d044411615e6f0baa7d3d933b3032cf97e168c7fa77d1be8a46008c109aee"}, {file = "PyMuPDF-1.23.26-cp312-none-win_amd64.whl", hash = "sha256:3f876533aa7f9a94bcd9a0225ce72571b7808260903fec1d95c120bc842fb52d"}, {file = "PyMuPDF-1.23.26-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:52df831d46beb9ff494f5fba3e5d069af6d81f49abf6b6e799ee01f4f8fa6799"}, {file = "PyMuPDF-1.23.26-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:0bbb0cf6593e53524f3fc26fb5e6ead17c02c64791caec7c4afe61b677dedf80"}, + {file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:5ef4360f20015673c20cf59b7e19afc97168795188c584254ed3778cde43ce77"}, {file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:d7cd88842b2e7f4c71eef4d87c98c35646b80b60e6375392d7ce40e519261f59"}, {file = "PyMuPDF-1.23.26-cp38-none-win32.whl", hash = "sha256:6577e2f473625e2d0df5f5a3bf1e4519e94ae749733cc9937994d1b256687bfa"}, {file = "PyMuPDF-1.23.26-cp38-none-win_amd64.whl", hash = "sha256:fbe1a3255b2cd0d769b2da2c4efdd0c0f30d4961a1aac02c0f75cf951b337aa4"}, {file = "PyMuPDF-1.23.26-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:73fce034f2afea886a59ead2d0caedf27e2b2a8558b5da16d0286882e0b1eb82"}, {file = "PyMuPDF-1.23.26-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:b3de8618b7cb5b36db611083840b3bcf09b11a893e2d8262f4e042102c7e65de"}, + {file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:879e7f5ad35709d8760ab6103c3d5dac8ab8043a856ab3653fd324af7358ee87"}, {file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:deee96c2fd415ded7b5070d8d5b2c60679aee6ed0e28ac0d2cb998060d835c2c"}, {file = "PyMuPDF-1.23.26-cp39-none-win32.whl", hash = "sha256:9f7f4ef99dd8ac97fb0b852efa3dcbee515798078b6c79a6a13c7b1e7c5d41a4"}, {file = "PyMuPDF-1.23.26-cp39-none-win_amd64.whl", hash = "sha256:ba9a54552c7afb9ec85432c765e2fa9a81413acfaa7d70db7c9b528297749e5b"}, @@ -8700,7 +8672,7 @@ files = [ name = "typing-inspect" version = "0.9.0" description = "Runtime inspection utilities for typing module." -optional = false +optional = true python-versions = "*" files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, @@ -9378,4 +9350,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "83762b3ce33babfb666f7f21e95a8c98397b38bbc4f4f93a1d4d3ae3be6cbd7b" +content-hash = "6896c5c74c17f9975950849e9e4273a0cc4ef89a4c4a458a9fd3cfe1ba3248f0" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 7b50ba07818..1222eaa3fdf 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.2.0" +version = "0.2.1" description = "Building applications with LLMs through composability" authors = [] license = "MIT" @@ -33,7 +33,6 @@ torch = {version = ">=1,<3", optional = true} jinja2 = {version = "^3", optional = true} tiktoken = {version = ">=0.7,<1.0", optional = true, python=">=3.9"} qdrant-client = {version = "^1.3.1", optional = true, python = ">=3.8.1,<3.12"} -dataclasses-json = ">= 0.5.7, < 0.7" cohere = {version = ">=4,<6", optional = true} openai = {version = "<2", optional = true} nlpcloud = {version = "^1", optional = true} @@ -170,7 +169,7 @@ tiktoken = ">=0.7,<1" anthropic = "^0.3.11" langchain-core = {path = "../core", develop = true} langchain-text-splitters = {path = "../text-splitters", develop = true} -langchainhub = "^0.1.15" +langchainhub = "^0.1.16" [tool.poetry.group.lint] optional = true diff --git a/libs/langchain/tests/integration_tests/test_hub.py b/libs/langchain/tests/integration_tests/test_hub.py index 4d90011afcc..340c916d38a 100644 --- a/libs/langchain/tests/integration_tests/test_hub.py +++ b/libs/langchain/tests/integration_tests/test_hub.py @@ -1,3 +1,5 @@ +import os + from langchain_core.prompts import ChatPromptTemplate from langchain import hub @@ -11,5 +13,13 @@ def test_hub_pull_public_prompt() -> None: assert prompt.metadata["lc_hub_repo"] == "my-first-prompt" assert ( prompt.metadata["lc_hub_commit_hash"] - == "52668c2f392f8f52d2fc0d6b60cb964e3961934fdbd5dbe72b62926be6b51742" + == "56489e79537fc477d8368e6c9902df15b5e9fe8bc0e4f38dc4b15b65e550077c" ) + + +def test_hub_pull_private_prompt() -> None: + private_prompt = hub.pull("integration-test", api_key=os.environ["HUB_API_KEY"]) + assert isinstance(private_prompt, ChatPromptTemplate) + assert private_prompt.metadata is not None + assert private_prompt.metadata["lc_hub_owner"] == "-" + assert private_prompt.metadata["lc_hub_repo"] == "integration-test" diff --git a/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py b/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py index 2a2af92269f..4f92aa8bbe9 100644 --- a/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py +++ b/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py @@ -21,7 +21,7 @@ class BaseFakeCallbackHandler(BaseModel): ignore_retriever_: bool = False ignore_chat_model_: bool = False - # to allow for similar callback handlers that are not technicall equal + # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests diff --git a/libs/langchain/tests/unit_tests/test_dependencies.py b/libs/langchain/tests/unit_tests/test_dependencies.py index f76107e7fd9..cca05c9c93f 100644 --- a/libs/langchain/tests/unit_tests/test_dependencies.py +++ b/libs/langchain/tests/unit_tests/test_dependencies.py @@ -41,7 +41,6 @@ def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None: "SQLAlchemy", "aiohttp", "async-timeout", - "dataclasses-json", "langchain-core", "langchain-text-splitters", "langsmith", diff --git a/libs/partners/ai21/pyproject.toml b/libs/partners/ai21/pyproject.toml index 4d78cc85d96..0133cff0925 100644 --- a/libs/partners/ai21/pyproject.toml +++ b/libs/partners/ai21/pyproject.toml @@ -5,6 +5,7 @@ description = "An integration package connecting AI21 and LangChain" authors = [] readme = "README.md" repository = "https://github.com/langchain-ai/langchain" +license = "MIT" [tool.poetry.urls] "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/ai21" diff --git a/libs/partners/ai21/tests/integration_tests/test_standard.py b/libs/partners/ai21/tests/integration_tests/test_standard.py index e281ff2f06d..2d74ca59ec3 100644 --- a/libs/partners/ai21/tests/integration_tests/test_standard.py +++ b/libs/partners/ai21/tests/integration_tests/test_standard.py @@ -41,6 +41,17 @@ class TestAI21J2(ChatModelIntegrationTests): chat_model_params, ) + @pytest.mark.xfail(reason="Not implemented.") + def test_usage_metadata( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + super().test_usage_metadata( + chat_model_class, + chat_model_params, + ) + @pytest.fixture def chat_model_params(self) -> dict: return { @@ -79,6 +90,17 @@ class TestAI21Jamba(ChatModelIntegrationTests): chat_model_params, ) + @pytest.mark.xfail(reason="Not implemented.") + def test_usage_metadata( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + super().test_usage_metadata( + chat_model_class, + chat_model_params, + ) + @pytest.fixture def chat_model_params(self) -> dict: return { diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 73b60dfa2ab..42a9117ab7d 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -172,50 +172,45 @@ def _format_messages(messages: List[BaseMessage]) -> Tuple[Optional[str], List[D content = [] for item in message.content: if isinstance(item, str): - content.append( - { - "type": "text", - "text": item, - } - ) + content.append({"type": "text", "text": item}) elif isinstance(item, dict): if "type" not in item: raise ValueError("Dict content item must have a type key") elif item["type"] == "image_url": # convert format source = _format_image(item["image_url"]["url"]) - content.append( - { - "type": "image", - "source": source, - } - ) + content.append({"type": "image", "source": source}) elif item["type"] == "tool_use": - item.pop("text", None) - content.append(item) + # If a tool_call with the same id as a tool_use content block + # exists, the tool_call is preferred. + if isinstance(message, AIMessage) and item["id"] in [ + tc["id"] for tc in message.tool_calls + ]: + overlapping = [ + tc + for tc in message.tool_calls + if tc["id"] == item["id"] + ] + content.extend( + _lc_tool_calls_to_anthropic_tool_use_blocks(overlapping) + ) + else: + item.pop("text", None) + content.append(item) elif item["type"] == "text": text = item.get("text", "") # Only add non-empty strings for now as empty ones are not # accepted. # https://github.com/anthropics/anthropic-sdk-python/issues/461 if text.strip(): - content.append( - { - "type": "text", - "text": text, - } - ) + content.append({"type": "text", "text": text}) else: content.append(item) else: raise ValueError( f"Content items must be str or dict, instead was: {type(item)}" ) - elif ( - isinstance(message, AIMessage) - and not isinstance(message.content, list) - and message.tool_calls - ): + elif isinstance(message, AIMessage) and message.tool_calls: content = ( [] if not message.content @@ -228,12 +223,7 @@ def _format_messages(messages: List[BaseMessage]) -> Tuple[Optional[str], List[D else: content = message.content - formatted_messages.append( - { - "role": role, - "content": content, - } - ) + formatted_messages.append({"role": role, "content": content}) return system, formatted_messages @@ -503,6 +493,12 @@ class ChatAnthropic(BaseChatModel): ) else: msg = AIMessage(content=content) + # Collect token usage + msg.usage_metadata = { + "input_tokens": data.usage.input_tokens, + "output_tokens": data.usage.output_tokens, + "total_tokens": data.usage.input_tokens + data.usage.output_tokens, + } return ChatResult( generations=[ChatGeneration(message=msg)], llm_output=llm_output, diff --git a/libs/partners/anthropic/poetry.lock b/libs/partners/anthropic/poetry.lock index 181d346f82c..bfbd38aa425 100644 --- a/libs/partners/anthropic/poetry.lock +++ b/libs/partners/anthropic/poetry.lock @@ -1,14 +1,14 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] [package.dependencies] @@ -16,19 +16,20 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anthropic" -version = "0.26.0" +version = "0.26.1" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.7" files = [ - {file = "anthropic-0.26.0-py3-none-any.whl", hash = "sha256:38fc415561d71dcf263b89da0cc6ecec498379b56256fc4242e9128bc707b283"}, - {file = "anthropic-0.26.0.tar.gz", hash = "sha256:6aaffeb05d515cf9788eef57150a5f827f3786883628ccac71dbe5671ab6f44e"}, + {file = "anthropic-0.26.1-py3-none-any.whl", hash = "sha256:2812b9b250b551ed8a1f0a7e6ae3f005654098994f45ebca5b5808bd154c9628"}, + {file = "anthropic-0.26.1.tar.gz", hash = "sha256:26680ff781a6f678a30a1dccd0743631e602b23a47719439ffdef5335fa167d8"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" +jiter = ">=0.1.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tokenizers = ">=0.13.0" @@ -172,13 +173,13 @@ files = [ [[package]] name = "codespell" -version = "2.2.6" +version = "2.3.0" description = "Codespell" optional = false python-versions = ">=3.8" files = [ - {file = "codespell-2.2.6-py3-none-any.whl", hash = "sha256:9ee9a3e5df0990604013ac2a9f22fa8e57669c827124a2e961fe8a1da4cacc07"}, - {file = "codespell-2.2.6.tar.gz", hash = "sha256:a8c65d8eb3faa03deabab6b3bbe798bea72e1799c7e9e955d57eca4096abcff9"}, + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, ] [package.extras] @@ -360,13 +361,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.23.0" +version = "0.23.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.23.0-py3-none-any.whl", hash = "sha256:075c30d48ee7db2bba779190dc526d2c11d422aed6f9044c5e2fdc2c432fdb91"}, - {file = "huggingface_hub-0.23.0.tar.gz", hash = "sha256:7126dedd10a4c6fac796ced4d87a8cf004efc722a5125c2c09299017fa366fa9"}, + {file = "huggingface_hub-0.23.1-py3-none-any.whl", hash = "sha256:720a5bffd2b1b449deb793da8b0df7a9390a7e238534d5a08c9fbcdecb1dd3cb"}, + {file = "huggingface_hub-0.23.1.tar.gz", hash = "sha256:4f62dbf6ae94f400c6d3419485e52bce510591432a5248a65d0cb72e4d479eb4"}, ] [package.dependencies] @@ -414,6 +415,76 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "jiter" +version = "0.4.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.4.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4aa6226d82a4a4505078c0bd5947bad65399635fc5cd4b226512e41753624edf"}, + {file = "jiter-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:947111ac906740a948e7b63799481acd3d5ef666ccb178d146e25718640b7408"}, + {file = "jiter-0.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69572ffb4e84ae289a7422b9af4ea123cae2ce0772228859b37d4b26b4bc92ea"}, + {file = "jiter-0.4.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ba6046cbb5d1baa5a781b846f7e5438596a332f249a857d63f86ef5d1d9563b0"}, + {file = "jiter-0.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4f346e54602782e66d07df0d1c7389384fd93680052ed6170da2c6dc758409e"}, + {file = "jiter-0.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49110ce693f07e97d61089d894cea05a0b9894d5ccc6ac6fc583028726c8c8af"}, + {file = "jiter-0.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e358df6fd129f3a4e087539f086355ad0107e5da16dbc8bc857d94222eaeed5"}, + {file = "jiter-0.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7eb852ca39a48f3c049def56f0d1771b32e948e4f429a782d14ef4cc64cfd26e"}, + {file = "jiter-0.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:44dc045decb2545bffe2da04ea4c36d9438d3f3d49fc47ed423ea75c352b712e"}, + {file = "jiter-0.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:413adb15372ac63db04373240f40925788e4282c997eeafc2040530049a0a599"}, + {file = "jiter-0.4.0-cp310-none-win32.whl", hash = "sha256:0b48ea71673a97b897e4b94bbc871e62495a5a85f836c9f90712a4c70aa3ef7e"}, + {file = "jiter-0.4.0-cp310-none-win_amd64.whl", hash = "sha256:6a1c84b44afafaf0ba6223679cf17af664b889da14da31d8af3595fd977d96fa"}, + {file = "jiter-0.4.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b2cc498345fa37ca23fbc20271a553aa46e6eb00924600f49b7dc4b2aa8952ee"}, + {file = "jiter-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69f7221ac09ab421abf04f89942026868297c568133998fb181bcf435760cbf3"}, + {file = "jiter-0.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef7d01c52f3e5a56ae73af36bd13797dd1a56711eb522748e5e84d15425b3f10"}, + {file = "jiter-0.4.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:39be97d5ce0c4d0dae28c23c03a0af0501a725589427e99763f99c42e18aa402"}, + {file = "jiter-0.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eac2ed1ec1e577b92b7ea2d4e6de8aec0c1164defd8af8affdc8ec0f0ec2904a"}, + {file = "jiter-0.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6258837d184c92c9cb91c983c310ad7269d41afb49d34f00ca9246e073943a03"}, + {file = "jiter-0.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123c2a77b066bf17a4d021e238e8351058cfa56b90ac04f2522d120dc64ea055"}, + {file = "jiter-0.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2df939f792c7a40e55f36700417db551b9f6b84d348990fa0f2c608adeb1f11b"}, + {file = "jiter-0.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cb1b09b16d40cf9ba1d11ba11e5b96ad29286a6a1c4ad5e6a2aef5e352a89f5d"}, + {file = "jiter-0.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0efb4208889ebdbf933bf08dbcbc16e64ffd34c8e2b28044ee142789a9dc3a67"}, + {file = "jiter-0.4.0-cp311-none-win32.whl", hash = "sha256:20545ac1b68e7e5b066a1e8347840c9cebdd02ace65faae2e655fc02ec5c915c"}, + {file = "jiter-0.4.0-cp311-none-win_amd64.whl", hash = "sha256:6b300f9887c8e4431cd03a974ea3e4f9958885636003c3864220a9b2d2f8462b"}, + {file = "jiter-0.4.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:923432a0563bbae404ff25bb010e348514a69bfab979f2f8119b23b625dbf6d9"}, + {file = "jiter-0.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ab8bb0ec8b97cec4422dc8b37b525442d969244488c805b834609ab0ccd788e2"}, + {file = "jiter-0.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b857adb127b9c533907226791eafa79c5038c3eb5a477984994bf7c4715ba518"}, + {file = "jiter-0.4.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2609cc0d1d8d470e921ff9a604afeb4c701bbe13e00bd9834d5aa6e7ea732a9b"}, + {file = "jiter-0.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d39e99f8b7df46a119b6f84321f6ba01f16fa46abfa765d44c05c486d8e66829"}, + {file = "jiter-0.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:56de8b518ebfe76a70f856741f6de248ce396c50a87acef827b6e8388e3a502d"}, + {file = "jiter-0.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488b7e777be47f67ce1a1f8f8eb907f9bbd81af5c03784a9bab09d025c250233"}, + {file = "jiter-0.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ea35e0ecbb5dadd457855eb980dcc548c14cf5341bcd22a43814cb56f2bcc79"}, + {file = "jiter-0.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e1a9e9ee69c80b63951c93226b68d0e955953f64fe758bad2afe7ef7f9016af9"}, + {file = "jiter-0.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:78e2f3cc2a32a21d43ccc5afcf66f5d17e827ccc4e6d21c0b353bdad2c7dcc9c"}, + {file = "jiter-0.4.0-cp312-none-win32.whl", hash = "sha256:eeaa7a2b47a99f4ebbb4142bb58b95617e09f24c87570f6a57d2770687c9ddbe"}, + {file = "jiter-0.4.0-cp312-none-win_amd64.whl", hash = "sha256:8d4a78b385b93ff59a67215d26000fcb4789a388fca3730d1b60fab17fc81e3c"}, + {file = "jiter-0.4.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ebf20a3fac1089ce26963bf04140da0f803d55332ec69d59c5a87cf1a87d29c4"}, + {file = "jiter-0.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d62244ffc6a168187452277adeefb7b2c30170689c6bf543a51e98e8c17ddab7"}, + {file = "jiter-0.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40b2cde77446a41cec595739fd168be87edff2428eaf7c3438231224dd0ab7a5"}, + {file = "jiter-0.4.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e51fc0a22021ec8905b9b00a2f7d25756f2ff7a653e35a790a2067ae126b51f6"}, + {file = "jiter-0.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a56e6f980b89d7cfe5c43811dcf52d6f37b319428a4540511235dafda9ea7808"}, + {file = "jiter-0.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fec16adab8d3d3d6d74e3711a1f380836ebeab2a20e3f88cfe2ec5094d8b84"}, + {file = "jiter-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19e3de515801c954e8f1dc1f575282a4a86df9e782d4993ea1ed2be9a8dedaa0"}, + {file = "jiter-0.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17e0ad8abf0bb04d81810eaeaab35d2c99b5da11fcd1058e0a389607ff6503b0"}, + {file = "jiter-0.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8dc0132b728f3b3e90ff0d1874504cd49c78f3553bf3745168a7fc0b4cf674e1"}, + {file = "jiter-0.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81a883104aa96e494d3d28eaf7070780d03ecee8ccfdfaf7e4899710340c47f1"}, + {file = "jiter-0.4.0-cp38-none-win32.whl", hash = "sha256:a044c53ab1aaa4af624ac9574181b5bad8e260aea7e03104738156511433deba"}, + {file = "jiter-0.4.0-cp38-none-win_amd64.whl", hash = "sha256:d920035c869053e3d9a0b3ff94384d16a8ef5fde3dea55f97bd29916f6e27554"}, + {file = "jiter-0.4.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:091e978f4e586a2f1c69bf940d45f4e6a23455877172a0ab7d6de04a3b119299"}, + {file = "jiter-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79134b2d601309bcbe3304a262d7d228ad61d53c80883231c637773000a6d683"}, + {file = "jiter-0.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c471473e0b05058b5d729ff04271b6d45a575ac8bd9948563268c734b380ac7e"}, + {file = "jiter-0.4.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb84b8930da8b32b0b1fdff9817e2c4b47e8981b5647ad11c4975403416e4112"}, + {file = "jiter-0.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f2805e28941751ebfe0948596a64cde4cfb9b84bea5282affd020063e659c96"}, + {file = "jiter-0.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42ef59f9e513bf081a8b5c5578933ea9c3a63e559e6e3501a3e72edcd456ff5e"}, + {file = "jiter-0.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae12e3906f9e565120ab569de261b738e3a1ec50c40e30c67499e4f893e9a8c"}, + {file = "jiter-0.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:264dc1324f45a793bc89af4f653225229eb17bca9ec7107dce6c8fb4fe68d20f"}, + {file = "jiter-0.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9a1c172ec47d846e25881dfbd52438ddb690da4ea04d185e477abd3db6c32f8a"}, + {file = "jiter-0.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ccde31d0bc114aedad0dbd71b7f63ba0f0eecd7ec9ae1926a0ca01c1eb2854e7"}, + {file = "jiter-0.4.0-cp39-none-win32.whl", hash = "sha256:13139b05792fbc13a0f9a5b4c89823ea0874141decae1b8f693f12bb1d28e061"}, + {file = "jiter-0.4.0-cp39-none-win_amd64.whl", hash = "sha256:3a729b2631c6d5551a41069697415fee9659c3eadc9ab87369376ba51930cd00"}, + {file = "jiter-0.4.0.tar.gz", hash = "sha256:68203e02e0419bc3eca717c580c2d8f615aeee1150e2a1fb68d6600a7e52a37c"}, +] + [[package]] name = "jsonpatch" version = "1.33" @@ -441,7 +512,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.0" +version = "0.2.2rc1" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -482,13 +553,13 @@ url = "../../standard-tests" [[package]] name = "langsmith" -version = "0.1.58" +version = "0.1.63" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.58-py3-none-any.whl", hash = "sha256:1148cc836ec99d1b2f37cd2fa3014fcac213bb6bad798a2b21bb9111c18c9768"}, - {file = "langsmith-0.1.58.tar.gz", hash = "sha256:a5060933c1fb3006b498ec849677993329d7e6138bdc2ec044068ab806e09c39"}, + {file = "langsmith-0.1.63-py3-none-any.whl", hash = "sha256:7810afdf5e3f3b472fc581a29371fb96cd843dde2149e048d1b9610325159d1e"}, + {file = "langsmith-0.1.63.tar.gz", hash = "sha256:a609405b52f6f54df442a142cbf19ab38662d54e532f96028b4c546434d4afdf"}, ] [package.dependencies] @@ -896,13 +967,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.2" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, + {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, ] [package.dependencies] @@ -917,28 +988,28 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.4.4" +version = "0.4.5" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.4.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:29d44ef5bb6a08e235c8249294fa8d431adc1426bfda99ed493119e6f9ea1bf6"}, - {file = "ruff-0.4.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c4efe62b5bbb24178c950732ddd40712b878a9b96b1d02b0ff0b08a090cbd891"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c8e2f1e8fc12d07ab521a9005d68a969e167b589cbcaee354cb61e9d9de9c15"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60ed88b636a463214905c002fa3eaab19795679ed55529f91e488db3fe8976ab"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b90fc5e170fc71c712cc4d9ab0e24ea505c6a9e4ebf346787a67e691dfb72e85"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8e7e6ebc10ef16dcdc77fd5557ee60647512b400e4a60bdc4849468f076f6eef"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9ddb2c494fb79fc208cd15ffe08f32b7682519e067413dbaf5f4b01a6087bcd"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c51c928a14f9f0a871082603e25a1588059b7e08a920f2f9fa7157b5bf08cfe9"}, - {file = "ruff-0.4.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5eb0a4bfd6400b7d07c09a7725e1a98c3b838be557fee229ac0f84d9aa49c36"}, - {file = "ruff-0.4.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b1867ee9bf3acc21778dcb293db504692eda5f7a11a6e6cc40890182a9f9e595"}, - {file = "ruff-0.4.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1aecced1269481ef2894cc495647392a34b0bf3e28ff53ed95a385b13aa45768"}, - {file = "ruff-0.4.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9da73eb616b3241a307b837f32756dc20a0b07e2bcb694fec73699c93d04a69e"}, - {file = "ruff-0.4.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:958b4ea5589706a81065e2a776237de2ecc3e763342e5cc8e02a4a4d8a5e6f95"}, - {file = "ruff-0.4.4-py3-none-win32.whl", hash = "sha256:cb53473849f011bca6e754f2cdf47cafc9c4f4ff4570003a0dad0b9b6890e876"}, - {file = "ruff-0.4.4-py3-none-win_amd64.whl", hash = "sha256:424e5b72597482543b684c11def82669cc6b395aa8cc69acc1858b5ef3e5daae"}, - {file = "ruff-0.4.4-py3-none-win_arm64.whl", hash = "sha256:39df0537b47d3b597293edbb95baf54ff5b49589eb7ff41926d8243caa995ea6"}, - {file = "ruff-0.4.4.tar.gz", hash = "sha256:f87ea42d5cdebdc6a69761a9d0bc83ae9b3b30d0ad78952005ba6568d6c022af"}, + {file = "ruff-0.4.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:8f58e615dec58b1a6b291769b559e12fdffb53cc4187160a2fc83250eaf54e96"}, + {file = "ruff-0.4.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:84dd157474e16e3a82745d2afa1016c17d27cb5d52b12e3d45d418bcc6d49264"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f483ad9d50b00e7fd577f6d0305aa18494c6af139bce7319c68a17180087f4"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63fde3bf6f3ad4e990357af1d30e8ba2730860a954ea9282c95fc0846f5f64af"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e3ba4620dee27f76bbcad97067766026c918ba0f2d035c2fc25cbdd04d9c97"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:441dab55c568e38d02bbda68a926a3d0b54f5510095c9de7f95e47a39e0168aa"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1169e47e9c4136c997f08f9857ae889d614c5035d87d38fda9b44b4338909cdf"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:755ac9ac2598a941512fc36a9070a13c88d72ff874a9781493eb237ab02d75df"}, + {file = "ruff-0.4.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4b02a65985be2b34b170025a8b92449088ce61e33e69956ce4d316c0fe7cce0"}, + {file = "ruff-0.4.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:75a426506a183d9201e7e5664de3f6b414ad3850d7625764106f7b6d0486f0a1"}, + {file = "ruff-0.4.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:6e1b139b45e2911419044237d90b60e472f57285950e1492c757dfc88259bb06"}, + {file = "ruff-0.4.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a6f29a8221d2e3d85ff0c7b4371c0e37b39c87732c969b4d90f3dad2e721c5b1"}, + {file = "ruff-0.4.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d6ef817124d72b54cc923f3444828ba24fa45c3164bc9e8f1813db2f3d3a8a11"}, + {file = "ruff-0.4.5-py3-none-win32.whl", hash = "sha256:aed8166c18b1a169a5d3ec28a49b43340949e400665555b51ee06f22813ef062"}, + {file = "ruff-0.4.5-py3-none-win_amd64.whl", hash = "sha256:b0b03c619d2b4350b4a27e34fd2ac64d0dabe1afbf43de57d0f9d8a05ecffa45"}, + {file = "ruff-0.4.5-py3-none-win_arm64.whl", hash = "sha256:9d15de3425f53161b3f5a5658d4522e4eee5ea002bf2ac7aa380743dd9ad5fba"}, + {file = "ruff-0.4.5.tar.gz", hash = "sha256:286eabd47e7d4d521d199cab84deca135557e6d1e0f0d01c29e757c3cb151b54"}, ] [[package]] @@ -1142,13 +1213,13 @@ telegram = ["requests"] [[package]] name = "typing-extensions" -version = "4.11.0" +version = "4.12.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, ] [[package]] @@ -1170,40 +1241,43 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "watchdog" -version = "4.0.0" +version = "4.0.1" description = "Filesystem events monitoring" optional = false python-versions = ">=3.8" files = [ - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, - {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, - {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, - {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, - {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, - {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, - {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, - {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, - {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, - {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, - {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, - {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, - {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, - {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, ] [package.extras] @@ -1212,4 +1286,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "f13937cb59d01403221df050b66264416402fd219852928b1e7cf107f7abc8a8" +content-hash = "b7a5f1c41811ecfc4f87a5261e0d4627abcd79a070355faa60408a3e7a1c691c" diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index 3f8f7d248be..859536541da 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-anthropic" -version = "0.1.13" +version = "0.1.14rc2" description = "An integration package connecting AnthropicMessages and LangChain" authors = [] readme = "README.md" @@ -12,7 +12,7 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain-core = ">=0.2.0,<0.3" +langchain-core = { version = ">=0.2.2rc1,<0.3", allow-prereleases = true } anthropic = ">=0.26.0,<1" defusedxml = { version = "^0.7.1", optional = true } diff --git a/libs/partners/anthropic/tests/integration_tests/test_experimental.py b/libs/partners/anthropic/tests/integration_tests/test_experimental.py index cc035d4589f..4e23e0f4422 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_experimental.py +++ b/libs/partners/anthropic/tests/integration_tests/test_experimental.py @@ -165,5 +165,5 @@ def test_anthropic_complex_structured_output() -> None: { "email": "From: Erick. The email is about the new project. The tone is positive. The action items are to send the report and to schedule a meeting." # noqa: E501 } - ) # noqa: E501 + ) assert isinstance(response, Email) diff --git a/libs/partners/anthropic/tests/unit_tests/_utils.py b/libs/partners/anthropic/tests/unit_tests/_utils.py index 3c053d2cc42..a39f31fc0f1 100644 --- a/libs/partners/anthropic/tests/unit_tests/_utils.py +++ b/libs/partners/anthropic/tests/unit_tests/_utils.py @@ -19,7 +19,7 @@ class BaseFakeCallbackHandler(BaseModel): ignore_retriever_: bool = False ignore_chat_model_: bool = False - # to allow for similar callback handlers that are not technicall equal + # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests diff --git a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py index 5da3a8a5e66..3c5c5b2691c 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py @@ -89,7 +89,16 @@ def test__format_output() -> None: ) expected = ChatResult( generations=[ - ChatGeneration(message=AIMessage("bar")), + ChatGeneration( + message=AIMessage( + "bar", + usage_metadata={ + "input_tokens": 2, + "output_tokens": 1, + "total_tokens": 3, + }, + ) + ), ], llm_output={ "id": "foo", @@ -343,10 +352,7 @@ def test__format_messages_with_str_content_and_tool_calls() -> None: "thought", tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], ) - tool = ToolMessage( - "blurb", - tool_call_id="1", - ) + tool = ToolMessage("blurb", tool_call_id="1") messages = [system, human, ai, tool] expected = ( "fuzz", @@ -355,10 +361,7 @@ def test__format_messages_with_str_content_and_tool_calls() -> None: { "role": "assistant", "content": [ - { - "type": "text", - "text": "thought", - }, + {"type": "text", "text": "thought"}, { "type": "tool_use", "name": "bar", @@ -385,12 +388,7 @@ def test__format_messages_with_list_content_and_tool_calls() -> None: # If content and tool_calls are specified and content is a list, then content is # preferred. ai = AIMessage( - [ - { - "type": "text", - "text": "thought", - } - ], + [{"type": "text", "text": "thought"}], tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], ) tool = ToolMessage( @@ -404,11 +402,53 @@ def test__format_messages_with_list_content_and_tool_calls() -> None: {"role": "user", "content": "foo"}, { "role": "assistant", + "content": [{"type": "text", "text": "thought"}], + }, + { + "role": "user", "content": [ + {"type": "tool_result", "content": "blurb", "tool_use_id": "1"} + ], + }, + ], + ) + actual = _format_messages(messages) + assert expected == actual + + +def test__format_messages_with_tool_use_blocks_and_tool_calls() -> None: + """Show that tool_calls are preferred to tool_use blocks when both have same id.""" + system = SystemMessage("fuzz") + human = HumanMessage("foo") + # NOTE: tool_use block in contents and tool_calls have different arguments. + ai = AIMessage( + [ + {"type": "text", "text": "thought"}, + { + "type": "tool_use", + "name": "bar", + "id": "1", + "input": {"baz": "NOT_BUZZ"}, + }, + ], + tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "BUZZ"}}], + ) + tool = ToolMessage("blurb", tool_call_id="1") + messages = [system, human, ai, tool] + expected = ( + "fuzz", + [ + {"role": "user", "content": "foo"}, + { + "role": "assistant", + "content": [ + {"type": "text", "text": "thought"}, { - "type": "text", - "text": "thought", - } + "type": "tool_use", + "name": "bar", + "id": "1", + "input": {"baz": "BUZZ"}, # tool_calls value preferred. + }, ], }, { diff --git a/libs/partners/chroma/langchain_chroma/vectorstores.py b/libs/partners/chroma/langchain_chroma/vectorstores.py index 221820173be..e4e1819d752 100644 --- a/libs/partners/chroma/langchain_chroma/vectorstores.py +++ b/libs/partners/chroma/langchain_chroma/vectorstores.py @@ -52,7 +52,11 @@ Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray] def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: - """Row-wise cosine similarity between two equal-width matrices.""" + """Row-wise cosine similarity between two equal-width matrices. + + Raises: + ValueError: If the number of columns in X and Y are not the same. + """ if len(X) == 0 or len(Y) == 0: return np.array([]) @@ -80,7 +84,21 @@ def maximal_marginal_relevance( lambda_mult: float = 0.5, k: int = 4, ) -> List[int]: - """Calculate maximal marginal relevance.""" + """Calculate maximal marginal relevance. + + Args: + query_embedding: Query embedding. + embedding_list: List of embeddings to select from. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of indices of embeddings selected by maximal marginal relevance. + """ + if min(k, len(embedding_list)) <= 0: return [] if query_embedding.ndim == 1: @@ -136,8 +154,21 @@ class Chroma(VectorStore): relevance_score_fn: Optional[Callable[[float], float]] = None, create_collection_if_not_exists: Optional[bool] = True, ) -> None: - """Initialize with a Chroma client.""" + """Initialize with a Chroma client. + Args: + collection_name: Name of the collection to create. + embedding_function: Embedding class object. Used to embed texts. + persist_director: Directory to persist the collection. + client_settings: Chroma client settings + collection_metadata: Collection configurations. + client: Chroma client. Documentation: + https://docs.trychroma.com/reference/js-client#class:-chromaclient + relevance_score_fn: Function to calculate relevance score from distance. + Used only in `similarity_search_with_relevance_scores` + create_collection_if_not_exists: Whether to create collection + if it doesn't exist. Defaults to True. + """ if client is not None: self._client_settings = client_settings self._client = client @@ -204,7 +235,23 @@ class Chroma(VectorStore): where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> Union[List[Document], chromadb.QueryResult]: - """Query the chroma collection.""" + """Query the chroma collection. + + Args: + query_texts: List of query texts. + query_embeddings: List of query embeddings. + n_results: Number of results to return. Defaults to 4. + where: dict used to filter results by + e.g. {"color" : "red", "price": 4.20}. + where_document: dict used to filter by the documents. + E.g. {$contains: {"text": "hello"}}. + + Returns: + List of `n_results` nearest neighbor embeddings for provided + query_embeddings or query_texts. + + See more: https://docs.trychroma.com/reference/py-collection#query + """ return self._collection.query( query_texts=query_texts, query_embeddings=query_embeddings, # type: ignore @@ -229,12 +276,16 @@ class Chroma(VectorStore): """Run more images through the embeddings and add to the vectorstore. Args: - uris List[str]: File path to the image. - metadatas (Optional[List[dict]], optional): Optional list of metadatas. - ids (Optional[List[str]], optional): Optional list of IDs. + uris: File path to the image. + metadatas: Optional list of metadatas. + When querying, you can filter on this metadata. + ids: Optional list of IDs. Returns: - List[str]: List of IDs of the added images. + List of IDs of the added images. + + Raises: + ValueError: When metadata is incorrect. """ # Map from uris to b64 encoded strings b64_texts = [self.encode_image(uri=uri) for uri in uris] @@ -312,14 +363,18 @@ class Chroma(VectorStore): """Run more texts through the embeddings and add to the vectorstore. Args: - texts (Iterable[str]): Texts to add to the vectorstore. - metadatas (Optional[List[dict]], optional): Optional list of metadatas. - ids (Optional[List[str]], optional): Optional list of IDs. + texts: Texts to add to the vectorstore. + metadatas: Optional list of metadatas. + When querying, you can filter on this metadata. + ids: Optional list of IDs. Returns: - List[str]: List of IDs of the added texts. + List of IDs of the added texts. + + Raises: + ValueError: When metadata is incorrect. """ - # TODO: Handle the case where the user doesn't provide ids on the Collection + if ids is None: ids = [str(uuid.uuid4()) for _ in texts] embeddings = None @@ -391,12 +446,12 @@ class Chroma(VectorStore): """Run similarity search with Chroma. Args: - query (str): Query text to search for. - k (int): Number of results to return. Defaults to 4. - filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + query: Query text to search for. + k: Number of results to return. Defaults to 4. + filter: Filter by metadata. Defaults to None. Returns: - List[Document]: List of documents most similar to the query text. + List of documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_score( query, k, filter=filter, **kwargs @@ -412,10 +467,14 @@ class Chroma(VectorStore): **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. + Args: - embedding (List[float]): Embedding to look up documents similar to. - k (int): Number of Documents to return. Defaults to 4. - filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + filter: Filter by metadata. Defaults to None. + where_document: dict used to filter by the documents. + E.g. {$contains: {"text": "hello"}}. + Returns: List of Documents most similar to the query vector. """ @@ -441,13 +500,14 @@ class Chroma(VectorStore): Args: embedding (List[float]): Embedding to look up documents similar to. - k (int): Number of Documents to return. Defaults to 4. - filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + k: Number of Documents to return. Defaults to 4. + filter: Filter by metadata. Defaults to None. + where_document: dict used to filter by the documents. + E.g. {$contains: {"text": "hello"}}. Returns: - List[Tuple[Document, float]]: List of documents most similar to - the query text and cosine distance in float for each. - Lower score represents more similarity. + List of documents most similar to the query text and relevance score + in float for each. Lower score represents more similarity. """ results = self.__query_collection( query_embeddings=embedding, @@ -469,14 +529,15 @@ class Chroma(VectorStore): """Run similarity search with Chroma with distance. Args: - query (str): Query text to search for. - k (int): Number of results to return. Defaults to 4. - filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + query: Query text to search for. + k: Number of results to return. Defaults to 4. + filter: Filter by metadata. Defaults to None. + where_document: dict used to filter by the documents. + E.g. {$contains: {"text": "hello"}}. Returns: - List[Tuple[Document, float]]: List of documents most similar to - the query text and cosine distance in float for each. - Lower score represents more similarity. + List of documents most similar to the query text and + distance in float for each. Lower score represents more similarity. """ if self._embedding_function is None: results = self.__query_collection( @@ -499,14 +560,21 @@ class Chroma(VectorStore): return _results_to_docs_and_scores(results) def _select_relevance_score_fn(self) -> Callable[[float], float]: + """Select the relevance score function based on collections distance metric. + + The most similar documents will have the lowest relevance score. Default + relevance score function is euclidean distance. Distance metric must be + provided in `collection_metadata` during initizalition of Chroma object. + Example: collection_metadata={"hnsw:space": "cosine"}. Available distance + metrics are: 'cosine', 'l2' and 'ip'. + + Returns: + The relevance score function. + + Raises: + ValueError: If the distance metric is not supported. """ - The 'correct' relevance function - may differ depending on a few things, including: - - the distance / similarity metric used by the VectorStore - - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - - embedding dimensionality - - etc. - """ + if self.override_relevance_score_fn: return self.override_relevance_score_fn @@ -541,18 +609,20 @@ class Chroma(VectorStore): **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. + Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. - fetch_k: Number of Documents to fetch to pass to MMR algorithm. + fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to + 20. lambda_mult: Number between 0 and 1 that determines the degree - of diversity among the results with 0 corresponding - to maximum diversity and 1 to minimum diversity. - Defaults to 0.5. - filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter: Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. @@ -600,10 +670,15 @@ class Chroma(VectorStore): of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. - filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + filter: Filter by metadata. Defaults to None. + where_document: dict used to filter by the documents. + E.g. {$contains: {"text": "hello"}}. Returns: List of Documents selected by maximal marginal relevance. + + Raises: + ValueError: If the embedding function is not provided. """ if self._embedding_function is None: raise ValueError( @@ -611,7 +686,7 @@ class Chroma(VectorStore): ) embedding = self._embedding_function.embed_query(query) - docs = self.max_marginal_relevance_search_by_vector( + return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, @@ -619,7 +694,6 @@ class Chroma(VectorStore): filter=filter, where_document=where_document, ) - return docs def delete_collection(self) -> None: """Delete the collection.""" @@ -656,6 +730,9 @@ class Chroma(VectorStore): Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional. + + Return: + A dict with the keys `"ids"`, `"embeddings"`, `"metadatas"`, `"documents"`. """ kwargs = { "ids": ids, @@ -674,8 +751,8 @@ class Chroma(VectorStore): """Update a document in the collection. Args: - document_id (str): ID of the document to update. - document (Document): Document to update. + document_id: ID of the document to update. + document: Document to update. """ return self.update_documents([document_id], [document]) @@ -684,8 +761,11 @@ class Chroma(VectorStore): """Update a document in the collection. Args: - ids (List[str]): List of ids of the document to update. - documents (List[Document]): List of documents to update. + ids: List of ids of the document to update. + documents: List of documents to update. + + Raises: + ValueError: If the embedding function is not provided. """ text = [document.page_content for document in documents] metadata = [document.metadata for document in documents] @@ -741,14 +821,14 @@ class Chroma(VectorStore): Otherwise, the data will be ephemeral in-memory. Args: - texts (List[str]): List of texts to add to the collection. - collection_name (str): Name of the collection to create. - persist_directory (Optional[str]): Directory to persist the collection. - embedding (Optional[Embeddings]): Embedding function. Defaults to None. - metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. - ids (Optional[List[str]]): List of document IDs. Defaults to None. - client_settings (Optional[chromadb.config.Settings]): Chroma client settings - collection_metadata (Optional[Dict]): Collection configurations. + texts: List of texts to add to the collection. + collection_name: Name of the collection to create. + persist_directory: Directory to persist the collection. + embedding: Embedding function. Defaults to None. + metadatas: List of metadatas. Defaults to None. + ids: List of document IDs. Defaults to None. + client_settings: Chroma client settings + collection_metadata: Collection configurations. Defaults to None. Returns: @@ -804,13 +884,13 @@ class Chroma(VectorStore): Otherwise, the data will be ephemeral in-memory. Args: - collection_name (str): Name of the collection to create. - persist_directory (Optional[str]): Directory to persist the collection. - ids (Optional[List[str]]): List of document IDs. Defaults to None. - documents (List[Document]): List of documents to add to the vectorstore. - embedding (Optional[Embeddings]): Embedding function. Defaults to None. - client_settings (Optional[chromadb.config.Settings]): Chroma client settings - collection_metadata (Optional[Dict]): Collection configurations. + collection_name: Name of the collection to create. + persist_directory: Directory to persist the collection. + ids : List of document IDs. Defaults to None. + documents: List of documents to add to the vectorstore. + embedding: Embedding function. Defaults to None. + client_settings: Chroma client settings + collection_metadata: Collection configurations. Defaults to None. Returns: diff --git a/libs/partners/fireworks/tests/integration_tests/test_standard.py b/libs/partners/fireworks/tests/integration_tests/test_standard.py index bfeeca693d5..26ba020419c 100644 --- a/libs/partners/fireworks/tests/integration_tests/test_standard.py +++ b/libs/partners/fireworks/tests/integration_tests/test_standard.py @@ -21,6 +21,17 @@ class TestFireworksStandard(ChatModelIntegrationTests): "temperature": 0, } + @pytest.mark.xfail(reason="Not implemented.") + def test_usage_metadata( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + super().test_usage_metadata( + chat_model_class, + chat_model_params, + ) + @pytest.mark.xfail(reason="Not yet implemented.") def test_tool_message_histories_list_content( self, diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index 8820897af1c..69234ce93d6 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -2,6 +2,7 @@ from __future__ import annotations +import json import os import warnings from operator import itemgetter @@ -44,8 +45,10 @@ from langchain_core.messages import ( FunctionMessageChunk, HumanMessage, HumanMessageChunk, + InvalidToolCall, SystemMessage, SystemMessageChunk, + ToolCall, ToolMessage, ToolMessageChunk, ) @@ -837,7 +840,14 @@ def _convert_message_to_dict(message: BaseMessage) -> dict: # If function call only, content is None not empty string if message_dict["content"] == "": message_dict["content"] = None - if "tool_calls" in message.additional_kwargs: + if message.tool_calls or message.invalid_tool_calls: + message_dict["tool_calls"] = [ + _lc_tool_call_to_groq_tool_call(tc) for tc in message.tool_calls + ] + [ + _lc_invalid_tool_call_to_groq_tool_call(tc) + for tc in message.invalid_tool_calls + ] + elif "tool_calls" in message.additional_kwargs: message_dict["tool_calls"] = message.additional_kwargs["tool_calls"] # If tool calls only, content is None not empty string if message_dict["content"] == "": @@ -944,3 +954,27 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: ) else: return ChatMessage(content=_dict.get("content", ""), role=role) + + +def _lc_tool_call_to_groq_tool_call(tool_call: ToolCall) -> dict: + return { + "type": "function", + "id": tool_call["id"], + "function": { + "name": tool_call["name"], + "arguments": json.dumps(tool_call["args"]), + }, + } + + +def _lc_invalid_tool_call_to_groq_tool_call( + invalid_tool_call: InvalidToolCall, +) -> dict: + return { + "type": "function", + "id": invalid_tool_call["id"], + "function": { + "name": invalid_tool_call["name"], + "arguments": invalid_tool_call["args"], + }, + } diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index 4048f7e8f6a..8224adc3ec8 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -14,6 +14,17 @@ class TestMistralStandard(ChatModelIntegrationTests): def chat_model_class(self) -> Type[BaseChatModel]: return ChatGroq + @pytest.mark.xfail(reason="Not implemented.") + def test_usage_metadata( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + super().test_usage_metadata( + chat_model_class, + chat_model_params, + ) + @pytest.mark.xfail(reason="Not yet implemented.") def test_tool_message_histories_list_content( self, diff --git a/libs/partners/milvus/.gitignore b/libs/partners/milvus/.gitignore new file mode 100644 index 00000000000..bee8a64b79a --- /dev/null +++ b/libs/partners/milvus/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/libs/partners/milvus/LICENSE b/libs/partners/milvus/LICENSE new file mode 100644 index 00000000000..426b6509034 --- /dev/null +++ b/libs/partners/milvus/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 LangChain, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/libs/partners/milvus/Makefile b/libs/partners/milvus/Makefile new file mode 100644 index 00000000000..263896e6e0a --- /dev/null +++ b/libs/partners/milvus/Makefile @@ -0,0 +1,57 @@ +.PHONY: all format lint test tests integration_tests docker_tests help extended_tests + +# Default target executed when no arguments are given to make. +all: help + +# Define a variable for the test file path. +TEST_FILE ?= tests/unit_tests/ +integration_test integration_tests: TEST_FILE=tests/integration_tests/ + +test tests integration_test integration_tests: + poetry run pytest $(TEST_FILE) + + +###################### +# LINTING AND FORMATTING +###################### + +# Define a variable for Python and notebook files. +PYTHON_FILES=. +MYPY_CACHE=.mypy_cache +lint format: PYTHON_FILES=. +lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/partners/milvus --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$') +lint_package: PYTHON_FILES=langchain_milvus +lint_tests: PYTHON_FILES=tests +lint_tests: MYPY_CACHE=.mypy_cache_test + +lint lint_diff lint_package lint_tests: + poetry run ruff . + poetry run ruff format $(PYTHON_FILES) --diff + poetry run ruff --select I $(PYTHON_FILES) + mkdir $(MYPY_CACHE); poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE) + +format format_diff: + poetry run ruff format $(PYTHON_FILES) + poetry run ruff --select I --fix $(PYTHON_FILES) + +spell_check: + poetry run codespell --toml pyproject.toml + +spell_fix: + poetry run codespell --toml pyproject.toml -w + +check_imports: $(shell find langchain_milvus -name '*.py') + poetry run python ./scripts/check_imports.py $^ + +###################### +# HELP +###################### + +help: + @echo '----' + @echo 'check_imports - check imports' + @echo 'format - run code formatters' + @echo 'lint - run linters' + @echo 'test - run unit tests' + @echo 'tests - run unit tests' + @echo 'test TEST_FILE= - run all tests in file' diff --git a/libs/partners/milvus/README.md b/libs/partners/milvus/README.md new file mode 100644 index 00000000000..29084081028 --- /dev/null +++ b/libs/partners/milvus/README.md @@ -0,0 +1,42 @@ +# langchain-milvus + +This is a library integration with [Milvus](https://milvus.io/) and [Zilliz Cloud](https://zilliz.com/cloud). + +## Installation + +```bash +pip install -U langchain-milvus +``` + +## Milvus vector database + +See a [usage example](https://python.langchain.com/docs/integrations/vectorstores/milvus/) + +```python +from langchain_milvus import Milvus +``` + +## Milvus hybrid search + +See a [usage example](https://python.langchain.com/docs/integrations/retrievers/milvus_hybrid_search/). + +```python +from langchain_milvus import MilvusCollectionHybridSearchRetriever +``` + + +## Zilliz Cloud vector database + +See a [usage example](https://python.langchain.com/docs/integrations/vectorstores/zilliz/). + +```python +from langchain_milvus import Zilliz +``` + +## Zilliz Cloud Pipeline Retriever + +See a [usage example](https://python.langchain.com/docs/integrations/retrievers/zilliz_cloud_pipeline). + +```python +from langchain_milvus import ZillizCloudPipelineRetriever +``` \ No newline at end of file diff --git a/libs/partners/milvus/langchain_milvus/__init__.py b/libs/partners/milvus/langchain_milvus/__init__.py new file mode 100644 index 00000000000..b19bc1d7e69 --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/__init__.py @@ -0,0 +1,12 @@ +from langchain_milvus.retrievers import ( + MilvusCollectionHybridSearchRetriever, + ZillizCloudPipelineRetriever, +) +from langchain_milvus.vectorstores import Milvus, Zilliz + +__all__ = [ + "Milvus", + "Zilliz", + "ZillizCloudPipelineRetriever", + "MilvusCollectionHybridSearchRetriever", +] diff --git a/libs/partners/milvus/langchain_milvus/py.typed b/libs/partners/milvus/langchain_milvus/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/langchain_milvus/retrievers/__init__.py b/libs/partners/milvus/langchain_milvus/retrievers/__init__.py new file mode 100644 index 00000000000..1edac3cb5af --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/retrievers/__init__.py @@ -0,0 +1,8 @@ +from langchain_milvus.retrievers.milvus_hybrid_search import ( + MilvusCollectionHybridSearchRetriever, +) +from langchain_milvus.retrievers.zilliz_cloud_pipeline_retriever import ( + ZillizCloudPipelineRetriever, +) + +__all__ = ["ZillizCloudPipelineRetriever", "MilvusCollectionHybridSearchRetriever"] diff --git a/libs/partners/milvus/langchain_milvus/retrievers/milvus_hybrid_search.py b/libs/partners/milvus/langchain_milvus/retrievers/milvus_hybrid_search.py new file mode 100644 index 00000000000..6b6e692dd2c --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/retrievers/milvus_hybrid_search.py @@ -0,0 +1,160 @@ +from typing import Any, Dict, List, Optional, Union + +from langchain_core.callbacks import CallbackManagerForRetrieverRun +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.retrievers import BaseRetriever +from pymilvus import AnnSearchRequest, Collection +from pymilvus.client.abstract import BaseRanker, SearchResult # type: ignore + +from langchain_milvus.utils.sparse import BaseSparseEmbedding + + +class MilvusCollectionHybridSearchRetriever(BaseRetriever): + """This is a hybrid search retriever + that uses Milvus Collection to retrieve documents based on multiple fields. + For more information, please refer to: + https://milvus.io/docs/release_notes.md#Multi-Embedding---Hybrid-Search + """ + + collection: Collection + """Milvus Collection object.""" + rerank: BaseRanker + """Milvus ranker object. Such as WeightedRanker or RRFRanker.""" + anns_fields: List[str] + """The names of vector fields that are used for ANNS search.""" + field_embeddings: List[Union[Embeddings, BaseSparseEmbedding]] + """The embedding functions of each vector fields, + which can be either Embeddings or BaseSparseEmbedding.""" + field_search_params: Optional[List[Dict]] = None + """The search parameters of each vector fields. + If not specified, the default search parameters will be used.""" + field_limits: Optional[List[int]] = None + """Limit number of results for each ANNS field. + If not specified, the default top_k will be used.""" + field_exprs: Optional[List[Optional[str]]] = None + """The boolean expression for filtering the search results.""" + top_k: int = 4 + """Final top-K number of documents to retrieve.""" + text_field: str = "text" + """The text field name, + which will be used as the `page_content` of a `Document` object.""" + output_fields: Optional[List[str]] = None + """Final output fields of the documents. + If not specified, all fields except the vector fields will be used as output fields, + which will be the `metadata` of a `Document` object.""" + + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + + # If some parameters are not specified, set default values + if self.field_search_params is None: + default_search_params = { + "metric_type": "L2", + "params": {"nprobe": 10}, + } + self.field_search_params = [default_search_params] * len(self.anns_fields) + if self.field_limits is None: + self.field_limits = [self.top_k] * len(self.anns_fields) + if self.field_exprs is None: + self.field_exprs = [None] * len(self.anns_fields) + + # Check the fields + self._validate_fields_num() + self.output_fields = self._get_output_fields() + self._validate_fields_name() + + # Load collection + self.collection.load() + + def _validate_fields_num(self) -> None: + assert ( + len(self.anns_fields) >= 2 + ), "At least two fields are required for hybrid search." + lengths = [len(self.anns_fields)] + if self.field_limits is not None: + lengths.append(len(self.field_limits)) + if self.field_exprs is not None: + lengths.append(len(self.field_exprs)) + + if not all(length == lengths[0] for length in lengths): + raise ValueError("All field-related lists must have the same length.") + + if len(self.field_search_params) != len(self.anns_fields): # type: ignore[arg-type] + raise ValueError( + "field_search_params must have the same length as anns_fields." + ) + + def _validate_fields_name(self) -> None: + collection_fields = [x.name for x in self.collection.schema.fields] + for field in self.anns_fields: + assert ( + field in collection_fields + ), f"{field} is not a valid field in the collection." + assert ( + self.text_field in collection_fields + ), f"{self.text_field} is not a valid field in the collection." + for field in self.output_fields: # type: ignore[union-attr] + assert ( + field in collection_fields + ), f"{field} is not a valid field in the collection." + + def _get_output_fields(self) -> List[str]: + if self.output_fields: + return self.output_fields + output_fields = [x.name for x in self.collection.schema.fields] + for field in self.anns_fields: + if field in output_fields: + output_fields.remove(field) + if self.text_field not in output_fields: + output_fields.append(self.text_field) + return output_fields + + def _build_ann_search_requests(self, query: str) -> List[AnnSearchRequest]: + search_requests = [] + for ann_field, embedding, param, limit, expr in zip( + self.anns_fields, + self.field_embeddings, + self.field_search_params, # type: ignore[arg-type] + self.field_limits, # type: ignore[arg-type] + self.field_exprs, # type: ignore[arg-type] + ): + request = AnnSearchRequest( + data=[embedding.embed_query(query)], + anns_field=ann_field, + param=param, + limit=limit, + expr=expr, + ) + search_requests.append(request) + return search_requests + + def _parse_document(self, data: dict) -> Document: + return Document( + page_content=data.pop(self.text_field), + metadata=data, + ) + + def _process_search_result( + self, search_results: List[SearchResult] + ) -> List[Document]: + documents = [] + for result in search_results[0]: + data = {x: result.entity.get(x) for x in self.output_fields} # type: ignore[union-attr] + doc = self._parse_document(data) + documents.append(doc) + return documents + + def _get_relevant_documents( + self, + query: str, + *, + run_manager: CallbackManagerForRetrieverRun, + **kwargs: Any, + ) -> List[Document]: + requests = self._build_ann_search_requests(query) + search_result = self.collection.hybrid_search( + requests, self.rerank, limit=self.top_k, output_fields=self.output_fields + ) + documents = self._process_search_result(search_result) + return documents diff --git a/libs/partners/milvus/langchain_milvus/retrievers/zilliz_cloud_pipeline_retriever.py b/libs/partners/milvus/langchain_milvus/retrievers/zilliz_cloud_pipeline_retriever.py new file mode 100644 index 00000000000..6fbccfa47fa --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/retrievers/zilliz_cloud_pipeline_retriever.py @@ -0,0 +1,215 @@ +from typing import Any, Dict, List, Optional + +import requests +from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun +from langchain_core.documents import Document +from langchain_core.retrievers import BaseRetriever + + +class ZillizCloudPipelineRetriever(BaseRetriever): + """`Zilliz Cloud Pipeline` retriever + + Args: + pipeline_ids (dict): A dictionary of pipeline ids. + Valid keys: "ingestion", "search", "deletion". + token (str): Zilliz Cloud's token. Defaults to "". + cloud_region (str='gcp-us-west1'): The region of Zilliz Cloud's cluster. + Defaults to 'gcp-us-west1'. + """ + + pipeline_ids: Dict + token: str = "" + cloud_region: str = "gcp-us-west1" + + def _get_relevant_documents( + self, + query: str, + top_k: int = 10, + offset: int = 0, + output_fields: List = [], + filter: str = "", + *, + run_manager: CallbackManagerForRetrieverRun, + ) -> List[Document]: + """ + Get documents relevant to a query. + + Args: + query (str): String to find relevant documents for + top_k (int=10): The number of results. Defaults to 10. + offset (int=0): The number of records to skip in the search result. + Defaults to 0. + output_fields (list=[]): The extra fields to present in output. + filter (str=""): The Milvus expression to filter search results. + Defaults to "". + run_manager (CallBackManagerForRetrieverRun): The callbacks handler to use. + + Returns: + List of relevant documents + """ + if "search" in self.pipeline_ids: + search_pipe_id = self.pipeline_ids.get("search") + else: + raise Exception( + "A search pipeline id must be provided in pipeline_ids to " + "get relevant documents." + ) + domain = ( + f"https://controller.api.{self.cloud_region}.zillizcloud.com/v1/pipelines" + ) + headers = { + "Authorization": f"Bearer {self.token}", + "Accept": "application/json", + "Content-Type": "application/json", + } + url = f"{domain}/{search_pipe_id}/run" + + params = { + "data": {"query_text": query}, + "params": { + "limit": top_k, + "offset": offset, + "outputFields": output_fields, + "filter": filter, + }, + } + + response = requests.post(url, headers=headers, json=params) + if response.status_code != 200: + raise RuntimeError(response.text) + response_dict = response.json() + if response_dict["code"] != 200: + raise RuntimeError(response_dict) + response_data = response_dict["data"] + search_results = response_data["result"] + return [ + Document( + page_content=result.pop("text") + if "text" in result + else result.pop("chunk_text"), + metadata=result, + ) + for result in search_results + ] + + def add_texts( + self, texts: List[str], metadata: Optional[Dict[str, Any]] = None + ) -> Dict: + """ + Add documents to store. + Only supported by a text ingestion pipeline in Zilliz Cloud. + + Args: + texts (List[str]): A list of text strings. + metadata (Dict[str, Any]): A key-value dictionary of metadata will + be inserted as preserved fields required by ingestion pipeline. + Defaults to None. + """ + if "ingestion" in self.pipeline_ids: + ingeset_pipe_id = self.pipeline_ids.get("ingestion") + else: + raise Exception( + "An ingestion pipeline id must be provided in pipeline_ids to" + " add documents." + ) + domain = ( + f"https://controller.api.{self.cloud_region}.zillizcloud.com/v1/pipelines" + ) + headers = { + "Authorization": f"Bearer {self.token}", + "Accept": "application/json", + "Content-Type": "application/json", + } + url = f"{domain}/{ingeset_pipe_id}/run" + + metadata = {} if metadata is None else metadata + params = {"data": {"text_list": texts}} + params["data"].update(metadata) + + response = requests.post(url, headers=headers, json=params) + if response.status_code != 200: + raise Exception(response.text) + response_dict = response.json() + if response_dict["code"] != 200: + raise Exception(response_dict) + response_data = response_dict["data"] + return response_data + + def add_doc_url( + self, doc_url: str, metadata: Optional[Dict[str, Any]] = None + ) -> Dict: + """ + Add a document from url. + Only supported by a document ingestion pipeline in Zilliz Cloud. + + Args: + doc_url: A document url. + metadata (Dict[str, Any]): A key-value dictionary of metadata will + be inserted as preserved fields required by ingestion pipeline. + Defaults to None. + """ + if "ingestion" in self.pipeline_ids: + ingest_pipe_id = self.pipeline_ids.get("ingestion") + else: + raise Exception( + "An ingestion pipeline id must be provided in pipeline_ids to " + "add documents." + ) + domain = ( + f"https://controller.api.{self.cloud_region}.zillizcloud.com/v1/pipelines" + ) + headers = { + "Authorization": f"Bearer {self.token}", + "Accept": "application/json", + "Content-Type": "application/json", + } + url = f"{domain}/{ingest_pipe_id}/run" + + params = {"data": {"doc_url": doc_url}} + metadata = {} if metadata is None else metadata + params["data"].update(metadata) + + response = requests.post(url, headers=headers, json=params) + if response.status_code != 200: + raise Exception(response.text) + response_dict = response.json() + if response_dict["code"] != 200: + raise Exception(response_dict) + response_data = response_dict["data"] + return response_data + + def delete(self, key: str, value: Any) -> Dict: + """ + Delete documents. Only supported by a deletion pipeline in Zilliz Cloud. + + Args: + key: input name to run the deletion pipeline + value: input value to run deletion pipeline + """ + if "deletion" in self.pipeline_ids: + deletion_pipe_id = self.pipeline_ids.get("deletion") + else: + raise Exception( + "A deletion pipeline id must be provided in pipeline_ids to " + "add documents." + ) + domain = ( + f"https://controller.api.{self.cloud_region}.zillizcloud.com/v1/pipelines" + ) + headers = { + "Authorization": f"Bearer {self.token}", + "Accept": "application/json", + "Content-Type": "application/json", + } + url = f"{domain}/{deletion_pipe_id}/run" + + params = {"data": {key: value}} + + response = requests.post(url, headers=headers, json=params) + if response.status_code != 200: + raise Exception(response.text) + response_dict = response.json() + if response_dict["code"] != 200: + raise Exception(response_dict) + response_data = response_dict["data"] + return response_data diff --git a/libs/partners/milvus/langchain_milvus/utils/__init__.py b/libs/partners/milvus/langchain_milvus/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/langchain_milvus/utils/sparse.py b/libs/partners/milvus/langchain_milvus/utils/sparse.py new file mode 100644 index 00000000000..027c978c65d --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/utils/sparse.py @@ -0,0 +1,54 @@ +from abc import ABC, abstractmethod +from typing import Dict, List + +from scipy.sparse import csr_array # type: ignore + + +class BaseSparseEmbedding(ABC): + """Interface for Sparse embedding models. + You can inherit from it and implement your custom sparse embedding model. + """ + + @abstractmethod + def embed_query(self, query: str) -> Dict[int, float]: + """Embed query text.""" + + @abstractmethod + def embed_documents(self, texts: List[str]) -> List[Dict[int, float]]: + """Embed search docs.""" + + +class BM25SparseEmbedding(BaseSparseEmbedding): + """This is a class that inherits BaseSparseEmbedding + and implements a sparse vector embedding model based on BM25. + This class uses the BM25 model in Milvus model to implement sparse vector embedding. + This model requires pymilvus[model] to be installed. + `pip install pymilvus[model]` + For more information please refer to: + https://milvus.io/docs/embed-with-bm25.md + """ + + def __init__(self, corpus: List[str], language: str = "en"): + from pymilvus.model.sparse import BM25EmbeddingFunction # type: ignore + from pymilvus.model.sparse.bm25.tokenizers import ( # type: ignore + build_default_analyzer, + ) + + self.analyzer = build_default_analyzer(language=language) + self.bm25_ef = BM25EmbeddingFunction(self.analyzer, num_workers=1) + self.bm25_ef.fit(corpus) + + def embed_query(self, text: str) -> Dict[int, float]: + return self._sparse_to_dict(self.bm25_ef.encode_queries([text])) + + def embed_documents(self, texts: List[str]) -> List[Dict[int, float]]: + sparse_arrays = self.bm25_ef.encode_documents(texts) + return [self._sparse_to_dict(sparse_array) for sparse_array in sparse_arrays] + + def _sparse_to_dict(self, sparse_array: csr_array) -> Dict[int, float]: + row_indices, col_indices = sparse_array.nonzero() + non_zero_values = sparse_array.data + result_dict = {} + for col_index, value in zip(col_indices, non_zero_values): + result_dict[col_index] = value + return result_dict diff --git a/libs/partners/milvus/langchain_milvus/vectorstores/__init__.py b/libs/partners/milvus/langchain_milvus/vectorstores/__init__.py new file mode 100644 index 00000000000..5c6f304db98 --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/vectorstores/__init__.py @@ -0,0 +1,7 @@ +from langchain_milvus.vectorstores.milvus import Milvus +from langchain_milvus.vectorstores.zilliz import Zilliz + +__all__ = [ + "Milvus", + "Zilliz", +] diff --git a/libs/partners/milvus/langchain_milvus/vectorstores/milvus.py b/libs/partners/milvus/langchain_milvus/vectorstores/milvus.py new file mode 100644 index 00000000000..712d323ccbf --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/vectorstores/milvus.py @@ -0,0 +1,1148 @@ +from __future__ import annotations + +import logging +from typing import Any, Iterable, List, Optional, Tuple, Union +from uuid import uuid4 + +import numpy as np +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.vectorstores import VectorStore + +logger = logging.getLogger(__name__) + +DEFAULT_MILVUS_CONNECTION = { + "uri": "http://localhost:19530", +} + +Matrix = Union[List[List[float]], List[np.ndarray], np.ndarray] + + +def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: + """Row-wise cosine similarity between two equal-width matrices.""" + if len(X) == 0 or len(Y) == 0: + return np.array([]) + + X = np.array(X) + Y = np.array(Y) + if X.shape[1] != Y.shape[1]: + raise ValueError( + f"Number of columns in X and Y must be the same. X has shape {X.shape} " + f"and Y has shape {Y.shape}." + ) + try: + import simsimd as simd # type: ignore + + X = np.array(X, dtype=np.float32) + Y = np.array(Y, dtype=np.float32) + Z = 1 - simd.cdist(X, Y, metric="cosine") + if isinstance(Z, float): + return np.array([Z]) + return np.array(Z) + except ImportError: + logger.debug( + "Unable to import simsimd, defaulting to NumPy implementation. If you want " + "to use simsimd please install with `pip install simsimd`." + ) + X_norm = np.linalg.norm(X, axis=1) + Y_norm = np.linalg.norm(Y, axis=1) + # Ignore divide by zero errors run time warnings as those are handled below. + with np.errstate(divide="ignore", invalid="ignore"): + similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm) + similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0 + return similarity + + +def maximal_marginal_relevance( + query_embedding: np.ndarray, + embedding_list: list, + lambda_mult: float = 0.5, + k: int = 4, +) -> List[int]: + """Calculate maximal marginal relevance.""" + if min(k, len(embedding_list)) <= 0: + return [] + if query_embedding.ndim == 1: + query_embedding = np.expand_dims(query_embedding, axis=0) + similarity_to_query = cosine_similarity(query_embedding, embedding_list)[0] + most_similar = int(np.argmax(similarity_to_query)) + idxs = [most_similar] + selected = np.array([embedding_list[most_similar]]) + while len(idxs) < min(k, len(embedding_list)): + best_score = -np.inf + idx_to_add = -1 + similarity_to_selected = cosine_similarity(embedding_list, selected) + for i, query_score in enumerate(similarity_to_query): + if i in idxs: + continue + redundant_score = max(similarity_to_selected[i]) + equation_score = ( + lambda_mult * query_score - (1 - lambda_mult) * redundant_score + ) + if equation_score > best_score: + best_score = equation_score + idx_to_add = i + idxs.append(idx_to_add) + selected = np.append(selected, [embedding_list[idx_to_add]], axis=0) + return idxs + + +class Milvus(VectorStore): + """`Milvus` vector store. + + You need to install `pymilvus` and run Milvus. + + See the following documentation for how to run a Milvus instance: + https://milvus.io/docs/install_standalone-docker.md + + If looking for a hosted Milvus, take a look at this documentation: + https://zilliz.com/cloud and make use of the Zilliz vectorstore found in + this project. + + IF USING L2/IP metric, IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. + + Args: + embedding_function (Embeddings): Function used to embed the text. + collection_name (str): Which Milvus collection to use. Defaults to + "LangChainCollection". + collection_description (str): The description of the collection. Defaults to + "". + collection_properties (Optional[dict[str, any]]): The collection properties. + Defaults to None. + If set, will override collection existing properties. + For example: {"collection.ttl.seconds": 60}. + connection_args (Optional[dict[str, any]]): The connection args used for + this class comes in the form of a dict. + consistency_level (str): The consistency level to use for a collection. + Defaults to "Session". + index_params (Optional[dict]): Which index params to use. Defaults to + HNSW/AUTOINDEX depending on service. + search_params (Optional[dict]): Which search params to use. Defaults to + default of index. + drop_old (Optional[bool]): Whether to drop the current collection. Defaults + to False. + auto_id (bool): Whether to enable auto id for primary key. Defaults to False. + If False, you needs to provide text ids (string less than 65535 bytes). + If True, Milvus will generate unique integers as primary keys. + primary_field (str): Name of the primary key field. Defaults to "pk". + text_field (str): Name of the text field. Defaults to "text". + vector_field (str): Name of the vector field. Defaults to "vector". + metadata_field (str): Name of the metadta field. Defaults to None. + When metadata_field is specified, + the document's metadata will store as json. + + The connection args used for this class comes in the form of a dict, + here are a few of the options: + address (str): The actual address of Milvus + instance. Example address: "localhost:19530" + uri (str): The uri of Milvus instance. Example uri: + "http://randomwebsite:19530", + "tcp:foobarsite:19530", + "https://ok.s3.south.com:19530". + or "path/to/local/directory/milvus_demo.db" for Milvus Lite. + host (str): The host of Milvus instance. Default at "localhost", + PyMilvus will fill in the default host if only port is provided. + port (str/int): The port of Milvus instance. Default at 19530, PyMilvus + will fill in the default port if only host is provided. + user (str): Use which user to connect to Milvus instance. If user and + password are provided, we will add related header in every RPC call. + password (str): Required when user is provided. The password + corresponding to the user. + secure (bool): Default is false. If set to true, tls will be enabled. + client_key_path (str): If use tls two-way authentication, need to + write the client.key path. + client_pem_path (str): If use tls two-way authentication, need to + write the client.pem path. + ca_pem_path (str): If use tls two-way authentication, need to write + the ca.pem path. + server_pem_path (str): If use tls one-way authentication, need to + write the server.pem path. + server_name (str): If use tls, need to write the common name. + + Example: + .. code-block:: python + + from langchain_milvus.vectorstores import Milvus + from langchain_openai.embeddings import OpenAIEmbeddings + + embedding = OpenAIEmbeddings() + # Connect to a milvus instance on localhost + milvus_store = Milvus( + embedding_function = Embeddings, + collection_name = "LangChainCollection", + drop_old = True, + auto_id = True + ) + + Raises: + ValueError: If the pymilvus python package is not installed. + """ + + def __init__( + self, + embedding_function: Embeddings, + collection_name: str = "LangChainCollection", + collection_description: str = "", + collection_properties: Optional[dict[str, Any]] = None, + connection_args: Optional[dict[str, Any]] = None, + consistency_level: str = "Session", + index_params: Optional[dict] = None, + search_params: Optional[dict] = None, + drop_old: Optional[bool] = False, + auto_id: bool = False, + *, + primary_field: str = "pk", + text_field: str = "text", + vector_field: str = "vector", + metadata_field: Optional[str] = None, + partition_key_field: Optional[str] = None, + partition_names: Optional[list] = None, + replica_number: int = 1, + timeout: Optional[float] = None, + num_shards: Optional[int] = None, + ): + """Initialize the Milvus vector store.""" + try: + from pymilvus import Collection, utility + except ImportError: + raise ValueError( + "Could not import pymilvus python package. " + "Please install it with `pip install pymilvus`." + ) + + # Default search params when one is not provided. + self.default_search_params = { + "IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}}, + "IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}}, + "IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, + "HNSW": {"metric_type": "L2", "params": {"ef": 10}}, + "RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}}, + "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}}, + "RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}}, + "IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}}, + "ANNOY": {"metric_type": "L2", "params": {"search_k": 10}}, + "SCANN": {"metric_type": "L2", "params": {"search_k": 10}}, + "AUTOINDEX": {"metric_type": "L2", "params": {}}, + "GPU_CAGRA": { + "metric_type": "L2", + "params": { + "itopk_size": 128, + "search_width": 4, + "min_iterations": 0, + "max_iterations": 0, + "team_size": 0, + }, + }, + "GPU_IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}}, + "GPU_IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, + } + + self.embedding_func = embedding_function + self.collection_name = collection_name + self.collection_description = collection_description + self.collection_properties = collection_properties + self.index_params = index_params + self.search_params = search_params + self.consistency_level = consistency_level + self.auto_id = auto_id + + # In order for a collection to be compatible, pk needs to be varchar + self._primary_field = primary_field + # In order for compatibility, the text field will need to be called "text" + self._text_field = text_field + # In order for compatibility, the vector field needs to be called "vector" + self._vector_field = vector_field + self._metadata_field = metadata_field + self._partition_key_field = partition_key_field + self.fields: list[str] = [] + self.partition_names = partition_names + self.replica_number = replica_number + self.timeout = timeout + self.num_shards = num_shards + + # Create the connection to the server + if connection_args is None: + connection_args = DEFAULT_MILVUS_CONNECTION + self.alias = self._create_connection_alias(connection_args) + self.col: Optional[Collection] = None + + # Grab the existing collection if it exists + if utility.has_collection(self.collection_name, using=self.alias): + self.col = Collection( + self.collection_name, + using=self.alias, + ) + if self.collection_properties is not None: + self.col.set_properties(self.collection_properties) + # If need to drop old, drop it + if drop_old and isinstance(self.col, Collection): + self.col.drop() + self.col = None + + # Initialize the vector store + self._init( + partition_names=partition_names, + replica_number=replica_number, + timeout=timeout, + ) + + @property + def embeddings(self) -> Embeddings: + return self.embedding_func + + def _create_connection_alias(self, connection_args: dict) -> str: + """Create the connection to the Milvus server.""" + from pymilvus import MilvusException, connections + + # Grab the connection arguments that are used for checking existing connection + host: str = connection_args.get("host", None) + port: Union[str, int] = connection_args.get("port", None) + address: str = connection_args.get("address", None) + uri: str = connection_args.get("uri", None) + user = connection_args.get("user", None) + + # Order of use is host/port, uri, address + if host is not None and port is not None: + given_address = str(host) + ":" + str(port) + elif uri is not None: + if uri.startswith("https://"): + given_address = uri.split("https://")[1] + elif uri.startswith("http://"): + given_address = uri.split("http://")[1] + else: + given_address = uri # Milvus lite + elif address is not None: + given_address = address + else: + given_address = None + logger.debug("Missing standard address type for reuse attempt") + + # User defaults to empty string when getting connection info + if user is not None: + tmp_user = user + else: + tmp_user = "" + + # If a valid address was given, then check if a connection exists + if given_address is not None: + for con in connections.list_connections(): + addr = connections.get_connection_addr(con[0]) + if ( + con[1] + and ("address" in addr) + and (addr["address"] == given_address) + and ("user" in addr) + and (addr["user"] == tmp_user) + ): + logger.debug("Using previous connection: %s", con[0]) + return con[0] + + # Generate a new connection if one doesn't exist + alias = uuid4().hex + try: + connections.connect(alias=alias, **connection_args) + logger.debug("Created new connection using: %s", alias) + return alias + except MilvusException as e: + logger.error("Failed to create new connection using: %s", alias) + raise e + + def _init( + self, + embeddings: Optional[list] = None, + metadatas: Optional[list[dict]] = None, + partition_names: Optional[list] = None, + replica_number: int = 1, + timeout: Optional[float] = None, + ) -> None: + if embeddings is not None: + self._create_collection(embeddings, metadatas) + self._extract_fields() + self._create_index() + self._create_search_params() + self._load( + partition_names=partition_names, + replica_number=replica_number, + timeout=timeout, + ) + + def _create_collection( + self, embeddings: list, metadatas: Optional[list[dict]] = None + ) -> None: + from pymilvus import ( + Collection, + CollectionSchema, + DataType, + FieldSchema, + MilvusException, + ) + from pymilvus.orm.types import infer_dtype_bydata # type: ignore + + # Determine embedding dim + dim = len(embeddings[0]) + fields = [] + if self._metadata_field is not None: + fields.append(FieldSchema(self._metadata_field, DataType.JSON)) + else: + # Determine metadata schema + if metadatas: + # Create FieldSchema for each entry in metadata. + for key, value in metadatas[0].items(): + # Infer the corresponding datatype of the metadata + dtype = infer_dtype_bydata(value) + # Datatype isn't compatible + if dtype == DataType.UNKNOWN or dtype == DataType.NONE: + logger.error( + ( + "Failure to create collection, " + "unrecognized dtype for key: %s" + ), + key, + ) + raise ValueError(f"Unrecognized datatype for {key}.") + # Dataype is a string/varchar equivalent + elif dtype == DataType.VARCHAR: + fields.append( + FieldSchema(key, DataType.VARCHAR, max_length=65_535) + ) + else: + fields.append(FieldSchema(key, dtype)) + + # Create the text field + fields.append( + FieldSchema(self._text_field, DataType.VARCHAR, max_length=65_535) + ) + # Create the primary key field + if self.auto_id: + fields.append( + FieldSchema( + self._primary_field, DataType.INT64, is_primary=True, auto_id=True + ) + ) + else: + fields.append( + FieldSchema( + self._primary_field, + DataType.VARCHAR, + is_primary=True, + auto_id=False, + max_length=65_535, + ) + ) + # Create the vector field, supports binary or float vectors + fields.append( + FieldSchema(self._vector_field, infer_dtype_bydata(embeddings[0]), dim=dim) + ) + + # Create the schema for the collection + schema = CollectionSchema( + fields, + description=self.collection_description, + partition_key_field=self._partition_key_field, + ) + + # Create the collection + try: + if self.num_shards is not None: + # Issue with defaults: + # https://github.com/milvus-io/pymilvus/blob/59bf5e811ad56e20946559317fed855330758d9c/pymilvus/client/prepare.py#L82-L85 + self.col = Collection( + name=self.collection_name, + schema=schema, + consistency_level=self.consistency_level, + using=self.alias, + num_shards=self.num_shards, + ) + else: + self.col = Collection( + name=self.collection_name, + schema=schema, + consistency_level=self.consistency_level, + using=self.alias, + ) + # Set the collection properties if they exist + if self.collection_properties is not None: + self.col.set_properties(self.collection_properties) + except MilvusException as e: + logger.error( + "Failed to create collection: %s error: %s", self.collection_name, e + ) + raise e + + def _extract_fields(self) -> None: + """Grab the existing fields from the Collection""" + from pymilvus import Collection + + if isinstance(self.col, Collection): + schema = self.col.schema + for x in schema.fields: + self.fields.append(x.name) + + def _get_index(self) -> Optional[dict[str, Any]]: + """Return the vector index information if it exists""" + from pymilvus import Collection + + if isinstance(self.col, Collection): + for x in self.col.indexes: + if x.field_name == self._vector_field: + return x.to_dict() + return None + + def _create_index(self) -> None: + """Create a index on the collection""" + from pymilvus import Collection, MilvusException + + if isinstance(self.col, Collection) and self._get_index() is None: + try: + # If no index params, use a default HNSW based one + if self.index_params is None: + self.index_params = { + "metric_type": "L2", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, + } + + try: + self.col.create_index( + self._vector_field, + index_params=self.index_params, + using=self.alias, + ) + + # If default did not work, most likely on Zilliz Cloud + except MilvusException: + # Use AUTOINDEX based index + self.index_params = { + "metric_type": "L2", + "index_type": "AUTOINDEX", + "params": {}, + } + self.col.create_index( + self._vector_field, + index_params=self.index_params, + using=self.alias, + ) + logger.debug( + "Successfully created an index on collection: %s", + self.collection_name, + ) + + except MilvusException as e: + logger.error( + "Failed to create an index on collection: %s", self.collection_name + ) + raise e + + def _create_search_params(self) -> None: + """Generate search params based on the current index type""" + from pymilvus import Collection + + if isinstance(self.col, Collection) and self.search_params is None: + index = self._get_index() + if index is not None: + index_type: str = index["index_param"]["index_type"] + metric_type: str = index["index_param"]["metric_type"] + self.search_params = self.default_search_params[index_type] + self.search_params["metric_type"] = metric_type + + def _load( + self, + partition_names: Optional[list] = None, + replica_number: int = 1, + timeout: Optional[float] = None, + ) -> None: + """Load the collection if available.""" + from pymilvus import Collection, utility + from pymilvus.client.types import LoadState # type: ignore + + timeout = self.timeout or timeout + if ( + isinstance(self.col, Collection) + and self._get_index() is not None + and utility.load_state(self.collection_name, using=self.alias) + == LoadState.NotLoad + ): + self.col.load( + partition_names=partition_names, + replica_number=replica_number, + timeout=timeout, + ) + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + timeout: Optional[float] = None, + batch_size: int = 1000, + *, + ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + """Insert text data into Milvus. + + Inserting data when the collection has not be made yet will result + in creating a new Collection. The data of the first entity decides + the schema of the new collection, the dim is extracted from the first + embedding and the columns are decided by the first metadata dict. + Metadata keys will need to be present for all inserted values. At + the moment there is no None equivalent in Milvus. + + Args: + texts (Iterable[str]): The texts to embed, it is assumed + that they all fit in memory. + metadatas (Optional[List[dict]]): Metadata dicts attached to each of + the texts. Defaults to None. + should be less than 65535 bytes. Required and work when auto_id is False. + timeout (Optional[float]): Timeout for each batch insert. Defaults + to None. + batch_size (int, optional): Batch size to use for insertion. + Defaults to 1000. + ids (Optional[List[str]]): List of text ids. The length of each item + + Raises: + MilvusException: Failure to add texts + + Returns: + List[str]: The resulting keys for each inserted element. + """ + from pymilvus import Collection, MilvusException + + texts = list(texts) + if not self.auto_id: + assert isinstance( + ids, list + ), "A list of valid ids are required when auto_id is False." + assert len(set(ids)) == len( + texts + ), "Different lengths of texts and unique ids are provided." + assert all( + len(x.encode()) <= 65_535 for x in ids + ), "Each id should be a string less than 65535 bytes." + + try: + embeddings = self.embedding_func.embed_documents(texts) + except NotImplementedError: + embeddings = [self.embedding_func.embed_query(x) for x in texts] + + if len(embeddings) == 0: + logger.debug("Nothing to insert, skipping.") + return [] + + # If the collection hasn't been initialized yet, perform all steps to do so + if not isinstance(self.col, Collection): + kwargs = {"embeddings": embeddings, "metadatas": metadatas} + if self.partition_names: + kwargs["partition_names"] = self.partition_names + if self.replica_number: + kwargs["replica_number"] = self.replica_number + if self.timeout: + kwargs["timeout"] = self.timeout + self._init(**kwargs) + + # Dict to hold all insert columns + insert_dict: dict[str, list] = { + self._text_field: texts, + self._vector_field: embeddings, + } + + if not self.auto_id: + insert_dict[self._primary_field] = ids # type: ignore[assignment] + + if self._metadata_field is not None: + for d in metadatas: # type: ignore[union-attr] + insert_dict.setdefault(self._metadata_field, []).append(d) + else: + # Collect the metadata into the insert dict. + if metadatas is not None: + for d in metadatas: + for key, value in d.items(): + keys = ( + [x for x in self.fields if x != self._primary_field] + if self.auto_id + else [x for x in self.fields] + ) + if key in keys: + insert_dict.setdefault(key, []).append(value) + + # Total insert count + vectors: list = insert_dict[self._vector_field] + total_count = len(vectors) + + pks: list[str] = [] + + assert isinstance(self.col, Collection) + for i in range(0, total_count, batch_size): + # Grab end index + end = min(i + batch_size, total_count) + # Convert dict to list of lists batch for insertion + insert_list = [ + insert_dict[x][i:end] for x in self.fields if x in insert_dict + ] + # Insert into the collection. + try: + res: Collection + timeout = self.timeout or timeout + res = self.col.insert(insert_list, timeout=timeout, **kwargs) + pks.extend(res.primary_keys) + except MilvusException as e: + logger.error( + "Failed to insert batch starting at entity: %s/%s", i, total_count + ) + raise e + return pks + + def similarity_search( + self, + query: str, + k: int = 4, + param: Optional[dict] = None, + expr: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs: Any, + ) -> List[Document]: + """Perform a similarity search against the query string. + + Args: + query (str): The text to search. + k (int, optional): How many results to return. Defaults to 4. + param (dict, optional): The search params for the index type. + Defaults to None. + expr (str, optional): Filtering expression. Defaults to None. + timeout (int, optional): How long to wait before timeout error. + Defaults to None. + kwargs: Collection.search() keyword arguments. + + Returns: + List[Document]: Document results for search. + """ + if self.col is None: + logger.debug("No existing collection to search.") + return [] + timeout = self.timeout or timeout + res = self.similarity_search_with_score( + query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs + ) + return [doc for doc, _ in res] + + def similarity_search_by_vector( + self, + embedding: List[float], + k: int = 4, + param: Optional[dict] = None, + expr: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs: Any, + ) -> List[Document]: + """Perform a similarity search against the query string. + + Args: + embedding (List[float]): The embedding vector to search. + k (int, optional): How many results to return. Defaults to 4. + param (dict, optional): The search params for the index type. + Defaults to None. + expr (str, optional): Filtering expression. Defaults to None. + timeout (int, optional): How long to wait before timeout error. + Defaults to None. + kwargs: Collection.search() keyword arguments. + + Returns: + List[Document]: Document results for search. + """ + if self.col is None: + logger.debug("No existing collection to search.") + return [] + timeout = self.timeout or timeout + res = self.similarity_search_with_score_by_vector( + embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs + ) + return [doc for doc, _ in res] + + def similarity_search_with_score( + self, + query: str, + k: int = 4, + param: Optional[dict] = None, + expr: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Perform a search on a query string and return results with score. + + For more information about the search parameters, take a look at the pymilvus + documentation found here: + https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md + + Args: + query (str): The text being searched. + k (int, optional): The amount of results to return. Defaults to 4. + param (dict): The search params for the specified index. + Defaults to None. + expr (str, optional): Filtering expression. Defaults to None. + timeout (float, optional): How long to wait before timeout error. + Defaults to None. + kwargs: Collection.search() keyword arguments. + + Returns: + List[float], List[Tuple[Document, any, any]]: + """ + if self.col is None: + logger.debug("No existing collection to search.") + return [] + + # Embed the query text. + embedding = self.embedding_func.embed_query(query) + timeout = self.timeout or timeout + res = self.similarity_search_with_score_by_vector( + embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs + ) + return res + + def similarity_search_with_score_by_vector( + self, + embedding: List[float], + k: int = 4, + param: Optional[dict] = None, + expr: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Perform a search on a query string and return results with score. + + For more information about the search parameters, take a look at the pymilvus + documentation found here: + https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md + + Args: + embedding (List[float]): The embedding vector being searched. + k (int, optional): The amount of results to return. Defaults to 4. + param (dict): The search params for the specified index. + Defaults to None. + expr (str, optional): Filtering expression. Defaults to None. + timeout (float, optional): How long to wait before timeout error. + Defaults to None. + kwargs: Collection.search() keyword arguments. + + Returns: + List[Tuple[Document, float]]: Result doc and score. + """ + if self.col is None: + logger.debug("No existing collection to search.") + return [] + + if param is None: + param = self.search_params + + # Determine result metadata fields with PK. + output_fields = self.fields[:] + output_fields.remove(self._vector_field) + timeout = self.timeout or timeout + # Perform the search. + res = self.col.search( + data=[embedding], + anns_field=self._vector_field, + param=param, + limit=k, + expr=expr, + output_fields=output_fields, + timeout=timeout, + **kwargs, + ) + # Organize results. + ret = [] + for result in res[0]: + data = {x: result.entity.get(x) for x in output_fields} + doc = self._parse_document(data) + pair = (doc, result.score) + ret.append(pair) + + return ret + + def max_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + param: Optional[dict] = None, + expr: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs: Any, + ) -> List[Document]: + """Perform a search and return results that are reordered by MMR. + + Args: + query (str): The text being searched. + k (int, optional): How many results to give. Defaults to 4. + fetch_k (int, optional): Total results to select k from. + Defaults to 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5 + param (dict, optional): The search params for the specified index. + Defaults to None. + expr (str, optional): Filtering expression. Defaults to None. + timeout (float, optional): How long to wait before timeout error. + Defaults to None. + kwargs: Collection.search() keyword arguments. + + + Returns: + List[Document]: Document results for search. + """ + if self.col is None: + logger.debug("No existing collection to search.") + return [] + + embedding = self.embedding_func.embed_query(query) + timeout = self.timeout or timeout + return self.max_marginal_relevance_search_by_vector( + embedding=embedding, + k=k, + fetch_k=fetch_k, + lambda_mult=lambda_mult, + param=param, + expr=expr, + timeout=timeout, + **kwargs, + ) + + def max_marginal_relevance_search_by_vector( + self, + embedding: list[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + param: Optional[dict] = None, + expr: Optional[str] = None, + timeout: Optional[float] = None, + **kwargs: Any, + ) -> List[Document]: + """Perform a search and return results that are reordered by MMR. + + Args: + embedding (str): The embedding vector being searched. + k (int, optional): How many results to give. Defaults to 4. + fetch_k (int, optional): Total results to select k from. + Defaults to 20. + lambda_mult: Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5 + param (dict, optional): The search params for the specified index. + Defaults to None. + expr (str, optional): Filtering expression. Defaults to None. + timeout (float, optional): How long to wait before timeout error. + Defaults to None. + kwargs: Collection.search() keyword arguments. + + Returns: + List[Document]: Document results for search. + """ + if self.col is None: + logger.debug("No existing collection to search.") + return [] + + if param is None: + param = self.search_params + + # Determine result metadata fields. + output_fields = self.fields[:] + output_fields.remove(self._vector_field) + timeout = self.timeout or timeout + # Perform the search. + res = self.col.search( + data=[embedding], + anns_field=self._vector_field, + param=param, + limit=fetch_k, + expr=expr, + output_fields=output_fields, + timeout=timeout, + **kwargs, + ) + # Organize results. + ids = [] + documents = [] + scores = [] + for result in res[0]: + data = {x: result.entity.get(x) for x in output_fields} + doc = self._parse_document(data) + documents.append(doc) + scores.append(result.score) + ids.append(result.id) + + vectors = self.col.query( + expr=f"{self._primary_field} in {ids}", + output_fields=[self._primary_field, self._vector_field], + timeout=timeout, + ) + # Reorganize the results from query to match search order. + vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors} + + ordered_result_embeddings = [vectors[x] for x in ids] + + # Get the new order of results. + new_ordering = maximal_marginal_relevance( + np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult + ) + + # Reorder the values and return. + ret = [] + for x in new_ordering: + # Function can return -1 index + if x == -1: + break + else: + ret.append(documents[x]) + return ret + + def delete( # type: ignore[no-untyped-def] + self, ids: Optional[List[str]] = None, expr: Optional[str] = None, **kwargs: str + ): + """Delete by vector ID or boolean expression. + Refer to [Milvus documentation](https://milvus.io/docs/delete_data.md) + for notes and examples of expressions. + + Args: + ids: List of ids to delete. + expr: Boolean expression that specifies the entities to delete. + kwargs: Other parameters in Milvus delete api. + """ + if isinstance(ids, list) and len(ids) > 0: + if expr is not None: + logger.warning( + "Both ids and expr are provided. " "Ignore expr and delete by ids." + ) + expr = f"{self._primary_field} in {ids}" + else: + assert isinstance( + expr, str + ), "Either ids list or expr string must be provided." + return self.col.delete(expr=expr, **kwargs) # type: ignore[union-attr] + + @classmethod + def from_texts( + cls, + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + collection_name: str = "LangChainCollection", + connection_args: dict[str, Any] = DEFAULT_MILVUS_CONNECTION, + consistency_level: str = "Session", + index_params: Optional[dict] = None, + search_params: Optional[dict] = None, + drop_old: bool = False, + *, + ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> Milvus: + """Create a Milvus collection, indexes it with HNSW, and insert data. + + Args: + texts (List[str]): Text data. + embedding (Embeddings): Embedding function. + metadatas (Optional[List[dict]]): Metadata for each text if it exists. + Defaults to None. + collection_name (str, optional): Collection name to use. Defaults to + "LangChainCollection". + connection_args (dict[str, Any], optional): Connection args to use. Defaults + to DEFAULT_MILVUS_CONNECTION. + consistency_level (str, optional): Which consistency level to use. Defaults + to "Session". + index_params (Optional[dict], optional): Which index_params to use. Defaults + to None. + search_params (Optional[dict], optional): Which search params to use. + Defaults to None. + drop_old (Optional[bool], optional): Whether to drop the collection with + that name if it exists. Defaults to False. + ids (Optional[List[str]]): List of text ids. Defaults to None. + + Returns: + Milvus: Milvus Vector Store + """ + if isinstance(ids, list) and len(ids) > 0: + auto_id = False + else: + auto_id = True + + vector_db = cls( + embedding_function=embedding, + collection_name=collection_name, + connection_args=connection_args, + consistency_level=consistency_level, + index_params=index_params, + search_params=search_params, + drop_old=drop_old, + auto_id=auto_id, + **kwargs, + ) + vector_db.add_texts(texts=texts, metadatas=metadatas, ids=ids) + return vector_db + + def _parse_document(self, data: dict) -> Document: + return Document( + page_content=data.pop(self._text_field), + metadata=data.pop(self._metadata_field) if self._metadata_field else data, + ) + + def get_pks(self, expr: str, **kwargs: Any) -> List[int] | None: + """Get primary keys with expression + + Args: + expr: Expression - E.g: "id in [1, 2]", or "title LIKE 'Abc%'" + + Returns: + List[int]: List of IDs (Primary Keys) + """ + + from pymilvus import MilvusException + + if self.col is None: + logger.debug("No existing collection to get pk.") + return None + + try: + query_result = self.col.query( + expr=expr, output_fields=[self._primary_field] + ) + except MilvusException as exc: + logger.error("Failed to get ids: %s error: %s", self.collection_name, exc) + raise exc + pks = [item.get(self._primary_field) for item in query_result] + return pks + + def upsert( + self, + ids: Optional[List[str]] = None, + documents: List[Document] | None = None, + **kwargs: Any, + ) -> List[str] | None: + """Update/Insert documents to the vectorstore. + + Args: + ids: IDs to update - Let's call get_pks to get ids with expression \n + documents (List[Document]): Documents to add to the vectorstore. + + Returns: + List[str]: IDs of the added texts. + """ + + from pymilvus import MilvusException + + if documents is None or len(documents) == 0: + logger.debug("No documents to upsert.") + return None + + if ids is not None and len(ids): + try: + self.delete(ids=ids) + except MilvusException: + pass + try: + return self.add_documents(documents=documents, **kwargs) + except MilvusException as exc: + logger.error( + "Failed to upsert entities: %s error: %s", self.collection_name, exc + ) + raise exc diff --git a/libs/partners/milvus/langchain_milvus/vectorstores/zilliz.py b/libs/partners/milvus/langchain_milvus/vectorstores/zilliz.py new file mode 100644 index 00000000000..02f2ce739ff --- /dev/null +++ b/libs/partners/milvus/langchain_milvus/vectorstores/zilliz.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import logging +from typing import Any, Dict, List, Optional + +from langchain_core.embeddings import Embeddings + +from langchain_milvus.vectorstores.milvus import Milvus + +logger = logging.getLogger(__name__) + + +class Zilliz(Milvus): + """`Zilliz` vector store. + + You need to have `pymilvus` installed and a + running Zilliz database. + + See the following documentation for how to run a Zilliz instance: + https://docs.zilliz.com/docs/create-cluster + + + IF USING L2/IP metric IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. + + Args: + embedding_function (Embeddings): Function used to embed the text. + collection_name (str): Which Zilliz collection to use. Defaults to + "LangChainCollection". + connection_args (Optional[dict[str, any]]): The connection args used for + this class comes in the form of a dict. + consistency_level (str): The consistency level to use for a collection. + Defaults to "Session". + index_params (Optional[dict]): Which index params to use. Defaults to + HNSW/AUTOINDEX depending on service. + search_params (Optional[dict]): Which search params to use. Defaults to + default of index. + drop_old (Optional[bool]): Whether to drop the current collection. Defaults + to False. + auto_id (bool): Whether to enable auto id for primary key. Defaults to False. + If False, you needs to provide text ids (string less than 65535 bytes). + If True, Milvus will generate unique integers as primary keys. + + The connection args used for this class comes in the form of a dict, + here are a few of the options: + address (str): The actual address of Zilliz + instance. Example address: "localhost:19530" + uri (str): The uri of Zilliz instance. Example uri: + "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", + host (str): The host of Zilliz instance. Default at "localhost", + PyMilvus will fill in the default host if only port is provided. + port (str/int): The port of Zilliz instance. Default at 19530, PyMilvus + will fill in the default port if only host is provided. + user (str): Use which user to connect to Zilliz instance. If user and + password are provided, we will add related header in every RPC call. + password (str): Required when user is provided. The password + corresponding to the user. + token (str): API key, for serverless clusters which can be used as + replacements for user and password. + secure (bool): Default is false. If set to true, tls will be enabled. + client_key_path (str): If use tls two-way authentication, need to + write the client.key path. + client_pem_path (str): If use tls two-way authentication, need to + write the client.pem path. + ca_pem_path (str): If use tls two-way authentication, need to write + the ca.pem path. + server_pem_path (str): If use tls one-way authentication, need to + write the server.pem path. + server_name (str): If use tls, need to write the common name. + + Example: + .. code-block:: python + + from langchain_community.vectorstores import Zilliz + from langchain_community.embeddings import OpenAIEmbeddings + + embedding = OpenAIEmbeddings() + # Connect to a Zilliz instance + milvus_store = Milvus( + embedding_function = embedding, + collection_name = "LangChainCollection", + connection_args = { + "uri": "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", + "user": "temp", + "password": "temp", + "token": "temp", # API key as replacements for user and password + "secure": True + } + drop_old: True, + ) + + Raises: + ValueError: If the pymilvus python package is not installed. + """ + + def _create_index(self) -> None: + """Create a index on the collection""" + from pymilvus import Collection, MilvusException + + if isinstance(self.col, Collection) and self._get_index() is None: + try: + # If no index params, use a default AutoIndex based one + if self.index_params is None: + self.index_params = { + "metric_type": "L2", + "index_type": "AUTOINDEX", + "params": {}, + } + + try: + self.col.create_index( + self._vector_field, + index_params=self.index_params, + using=self.alias, + ) + + # If default did not work, most likely Milvus self-hosted + except MilvusException: + # Use HNSW based index + self.index_params = { + "metric_type": "L2", + "index_type": "HNSW", + "params": {"M": 8, "efConstruction": 64}, + } + self.col.create_index( + self._vector_field, + index_params=self.index_params, + using=self.alias, + ) + logger.debug( + "Successfully created an index on collection: %s", + self.collection_name, + ) + + except MilvusException as e: + logger.error( + "Failed to create an index on collection: %s", self.collection_name + ) + raise e + + @classmethod + def from_texts( + cls, + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + collection_name: str = "LangChainCollection", + connection_args: Optional[Dict[str, Any]] = None, + consistency_level: str = "Session", + index_params: Optional[dict] = None, + search_params: Optional[dict] = None, + drop_old: bool = False, + *, + ids: Optional[List[str]] = None, + auto_id: bool = False, + **kwargs: Any, + ) -> Zilliz: + """Create a Zilliz collection, indexes it with HNSW, and insert data. + + Args: + texts (List[str]): Text data. + embedding (Embeddings): Embedding function. + metadatas (Optional[List[dict]]): Metadata for each text if it exists. + Defaults to None. + collection_name (str, optional): Collection name to use. Defaults to + "LangChainCollection". + connection_args (dict[str, Any], optional): Connection args to use. Defaults + to DEFAULT_MILVUS_CONNECTION. + consistency_level (str, optional): Which consistency level to use. Defaults + to "Session". + index_params (Optional[dict], optional): Which index_params to use. + Defaults to None. + search_params (Optional[dict], optional): Which search params to use. + Defaults to None. + drop_old (Optional[bool], optional): Whether to drop the collection with + that name if it exists. Defaults to False. + ids (Optional[List[str]]): List of text ids. + auto_id (bool): Whether to enable auto id for primary key. Defaults to + False. If False, you needs to provide text ids (string less than 65535 + bytes). If True, Milvus will generate unique integers as primary keys. + + Returns: + Zilliz: Zilliz Vector Store + """ + vector_db = cls( + embedding_function=embedding, + collection_name=collection_name, + connection_args=connection_args or {}, + consistency_level=consistency_level, + index_params=index_params, + search_params=search_params, + drop_old=drop_old, + auto_id=auto_id, + **kwargs, + ) + vector_db.add_texts(texts=texts, metadatas=metadatas, ids=ids) + return vector_db diff --git a/libs/partners/milvus/poetry.lock b/libs/partners/milvus/poetry.lock new file mode 100644 index 00000000000..059dade2041 --- /dev/null +++ b/libs/partners/milvus/poetry.lock @@ -0,0 +1,1291 @@ +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "certifi" +version = "2024.2.2" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "codespell" +version = "2.3.0" +description = "Codespell" +optional = false +python-versions = ">=3.8" +files = [ + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, +] + +[package.extras] +dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] +hard-encoding-detection = ["chardet"] +toml = ["tomli"] +types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "environs" +version = "9.5.0" +description = "simplified environment variable parsing" +optional = false +python-versions = ">=3.6" +files = [ + {file = "environs-9.5.0-py2.py3-none-any.whl", hash = "sha256:1e549569a3de49c05f856f40bce86979e7d5ffbbc4398e7f338574c220189124"}, + {file = "environs-9.5.0.tar.gz", hash = "sha256:a76307b36fbe856bdca7ee9161e6c466fd7fcffc297109a118c59b54e27e30c9"}, +] + +[package.dependencies] +marshmallow = ">=3.0.0" +python-dotenv = "*" + +[package.extras] +dev = ["dj-database-url", "dj-email-url", "django-cache-url", "flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)", "pytest", "tox"] +django = ["dj-database-url", "dj-email-url", "django-cache-url"] +lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"] +tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"] + +[[package]] +name = "exceptiongroup" +version = "1.2.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "freezegun" +version = "1.5.1" +description = "Let your Python tests travel through time" +optional = false +python-versions = ">=3.7" +files = [ + {file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"}, + {file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"}, +] + +[package.dependencies] +python-dateutil = ">=2.7" + +[[package]] +name = "grpcio" +version = "1.63.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +files = [ + {file = "grpcio-1.63.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:2e93aca840c29d4ab5db93f94ed0a0ca899e241f2e8aec6334ab3575dc46125c"}, + {file = "grpcio-1.63.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:91b73d3f1340fefa1e1716c8c1ec9930c676d6b10a3513ab6c26004cb02d8b3f"}, + {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b3afbd9d6827fa6f475a4f91db55e441113f6d3eb9b7ebb8fb806e5bb6d6bd0d"}, + {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f3f6883ce54a7a5f47db43289a0a4c776487912de1a0e2cc83fdaec9685cc9f"}, + {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf8dae9cc0412cb86c8de5a8f3be395c5119a370f3ce2e69c8b7d46bb9872c8d"}, + {file = "grpcio-1.63.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:08e1559fd3b3b4468486b26b0af64a3904a8dbc78d8d936af9c1cf9636eb3e8b"}, + {file = "grpcio-1.63.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5c039ef01516039fa39da8a8a43a95b64e288f79f42a17e6c2904a02a319b357"}, + {file = "grpcio-1.63.0-cp310-cp310-win32.whl", hash = "sha256:ad2ac8903b2eae071055a927ef74121ed52d69468e91d9bcbd028bd0e554be6d"}, + {file = "grpcio-1.63.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2e44f59316716532a993ca2966636df6fbe7be4ab6f099de6815570ebe4383a"}, + {file = "grpcio-1.63.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:f28f8b2db7b86c77916829d64ab21ff49a9d8289ea1564a2b2a3a8ed9ffcccd3"}, + {file = "grpcio-1.63.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:65bf975639a1f93bee63ca60d2e4951f1b543f498d581869922910a476ead2f5"}, + {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:b5194775fec7dc3dbd6a935102bb156cd2c35efe1685b0a46c67b927c74f0cfb"}, + {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4cbb2100ee46d024c45920d16e888ee5d3cf47c66e316210bc236d5bebc42b3"}, + {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff737cf29b5b801619f10e59b581869e32f400159e8b12d7a97e7e3bdeee6a2"}, + {file = "grpcio-1.63.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd1e68776262dd44dedd7381b1a0ad09d9930ffb405f737d64f505eb7f77d6c7"}, + {file = "grpcio-1.63.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f45f27f516548e23e4ec3fbab21b060416007dbe768a111fc4611464cc773f"}, + {file = "grpcio-1.63.0-cp311-cp311-win32.whl", hash = "sha256:878b1d88d0137df60e6b09b74cdb73db123f9579232c8456f53e9abc4f62eb3c"}, + {file = "grpcio-1.63.0-cp311-cp311-win_amd64.whl", hash = "sha256:756fed02dacd24e8f488f295a913f250b56b98fb793f41d5b2de6c44fb762434"}, + {file = "grpcio-1.63.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:93a46794cc96c3a674cdfb59ef9ce84d46185fe9421baf2268ccb556f8f81f57"}, + {file = "grpcio-1.63.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a7b19dfc74d0be7032ca1eda0ed545e582ee46cd65c162f9e9fc6b26ef827dc6"}, + {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:8064d986d3a64ba21e498b9a376cbc5d6ab2e8ab0e288d39f266f0fca169b90d"}, + {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:219bb1848cd2c90348c79ed0a6b0ea51866bc7e72fa6e205e459fedab5770172"}, + {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d60cd1d58817bc5985fae6168d8b5655c4981d448d0f5b6194bbcc038090d2"}, + {file = "grpcio-1.63.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e350cb096e5c67832e9b6e018cf8a0d2a53b2a958f6251615173165269a91b0"}, + {file = "grpcio-1.63.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:56cdf96ff82e3cc90dbe8bac260352993f23e8e256e063c327b6cf9c88daf7a9"}, + {file = "grpcio-1.63.0-cp312-cp312-win32.whl", hash = "sha256:3a6d1f9ea965e750db7b4ee6f9fdef5fdf135abe8a249e75d84b0a3e0c668a1b"}, + {file = "grpcio-1.63.0-cp312-cp312-win_amd64.whl", hash = "sha256:d2497769895bb03efe3187fb1888fc20e98a5f18b3d14b606167dacda5789434"}, + {file = "grpcio-1.63.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fdf348ae69c6ff484402cfdb14e18c1b0054ac2420079d575c53a60b9b2853ae"}, + {file = "grpcio-1.63.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a3abfe0b0f6798dedd2e9e92e881d9acd0fdb62ae27dcbbfa7654a57e24060c0"}, + {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:6ef0ad92873672a2a3767cb827b64741c363ebaa27e7f21659e4e31f4d750280"}, + {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b416252ac5588d9dfb8a30a191451adbf534e9ce5f56bb02cd193f12d8845b7f"}, + {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3b77eaefc74d7eb861d3ffbdf91b50a1bb1639514ebe764c47773b833fa2d91"}, + {file = "grpcio-1.63.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b005292369d9c1f80bf70c1db1c17c6c342da7576f1c689e8eee4fb0c256af85"}, + {file = "grpcio-1.63.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cdcda1156dcc41e042d1e899ba1f5c2e9f3cd7625b3d6ebfa619806a4c1aadda"}, + {file = "grpcio-1.63.0-cp38-cp38-win32.whl", hash = "sha256:01799e8649f9e94ba7db1aeb3452188048b0019dc37696b0f5ce212c87c560c3"}, + {file = "grpcio-1.63.0-cp38-cp38-win_amd64.whl", hash = "sha256:6a1a3642d76f887aa4009d92f71eb37809abceb3b7b5a1eec9c554a246f20e3a"}, + {file = "grpcio-1.63.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:75f701ff645858a2b16bc8c9fc68af215a8bb2d5a9b647448129de6e85d52bce"}, + {file = "grpcio-1.63.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cacdef0348a08e475a721967f48206a2254a1b26ee7637638d9e081761a5ba86"}, + {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:0697563d1d84d6985e40ec5ec596ff41b52abb3fd91ec240e8cb44a63b895094"}, + {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6426e1fb92d006e47476d42b8f240c1d916a6d4423c5258ccc5b105e43438f61"}, + {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48cee31bc5f5a31fb2f3b573764bd563aaa5472342860edcc7039525b53e46a"}, + {file = "grpcio-1.63.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:50344663068041b34a992c19c600236e7abb42d6ec32567916b87b4c8b8833b3"}, + {file = "grpcio-1.63.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:259e11932230d70ef24a21b9fb5bb947eb4703f57865a404054400ee92f42f5d"}, + {file = "grpcio-1.63.0-cp39-cp39-win32.whl", hash = "sha256:a44624aad77bf8ca198c55af811fd28f2b3eaf0a50ec5b57b06c034416ef2d0a"}, + {file = "grpcio-1.63.0-cp39-cp39-win_amd64.whl", hash = "sha256:166e5c460e5d7d4656ff9e63b13e1f6029b122104c1633d5f37eaea348d7356d"}, + {file = "grpcio-1.63.0.tar.gz", hash = "sha256:f3023e14805c61bc439fb40ca545ac3d5740ce66120a678a3c6c2c55b70343d1"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.63.0)"] + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +description = "Apply JSON-Patches (RFC 6902)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, +] + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +name = "jsonpointer" +version = "2.4" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, +] + +[[package]] +name = "langchain-core" +version = "0.2.2rc1" +description = "Building applications with LLMs through composability" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [] +develop = true + +[package.dependencies] +jsonpatch = "^1.33" +langsmith = "^0.1.0" +packaging = "^23.2" +pydantic = ">=1,<3" +PyYAML = ">=5.3" +tenacity = "^8.1.0" + +[package.extras] +extended-testing = ["jinja2 (>=3,<4)"] + +[package.source] +type = "directory" +url = "../../core" + +[[package]] +name = "langsmith" +version = "0.1.63" +description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langsmith-0.1.63-py3-none-any.whl", hash = "sha256:7810afdf5e3f3b472fc581a29371fb96cd843dde2149e048d1b9610325159d1e"}, + {file = "langsmith-0.1.63.tar.gz", hash = "sha256:a609405b52f6f54df442a142cbf19ab38662d54e532f96028b4c546434d4afdf"}, +] + +[package.dependencies] +orjson = ">=3.9.14,<4.0.0" +pydantic = ">=1,<3" +requests = ">=2,<3" + +[[package]] +name = "marshmallow" +version = "3.21.2" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"}, + {file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "milvus-lite" +version = "2.4.6" +description = "A lightweight version of Milvus wrapped with Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "milvus_lite-2.4.6-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:43ac9f36903b31455e50a8f1d9cb033e18971643029c89eb5c9610f01c1f2e26"}, + {file = "milvus_lite-2.4.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:95afe2ee019c569713926747bbe18ab5944927797374fed796f00fbe564cccd6"}, + {file = "milvus_lite-2.4.6-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2f9116bfc6a0d95636d3aa144582486b622c492689f3c93c519101bd7158b7db"}, +] + +[[package]] +name = "mypy" +version = "0.991" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, + {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, + {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, + {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, + {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, + {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, + {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, + {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, + {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, + {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, + {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, + {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, + {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, + {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, + {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, + {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, + {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, + {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, + {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, + {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, + {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, + {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, + {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, + {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, + {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, + {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, + {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, + {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, + {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, + {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, +] + +[package.dependencies] +mypy-extensions = ">=0.4.3" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "orjson" +version = "3.10.3" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9fb6c3f9f5490a3eb4ddd46fc1b6eadb0d6fc16fb3f07320149c3286a1409dd8"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:252124b198662eee80428f1af8c63f7ff077c88723fe206a25df8dc57a57b1fa"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f3e87733823089a338ef9bbf363ef4de45e5c599a9bf50a7a9b82e86d0228da"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8334c0d87103bb9fbbe59b78129f1f40d1d1e8355bbed2ca71853af15fa4ed3"}, + {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1952c03439e4dce23482ac846e7961f9d4ec62086eb98ae76d97bd41d72644d7"}, + {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c0403ed9c706dcd2809f1600ed18f4aae50be263bd7112e54b50e2c2bc3ebd6d"}, + {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:382e52aa4270a037d41f325e7d1dfa395b7de0c367800b6f337d8157367bf3a7"}, + {file = "orjson-3.10.3-cp310-none-win32.whl", hash = "sha256:be2aab54313752c04f2cbaab4515291ef5af8c2256ce22abc007f89f42f49109"}, + {file = "orjson-3.10.3-cp310-none-win_amd64.whl", hash = "sha256:416b195f78ae461601893f482287cee1e3059ec49b4f99479aedf22a20b1098b"}, + {file = "orjson-3.10.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:73100d9abbbe730331f2242c1fc0bcb46a3ea3b4ae3348847e5a141265479700"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a12eee96e3ab828dbfcb4d5a0023aa971b27143a1d35dc214c176fdfb29b3"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520de5e2ef0b4ae546bea25129d6c7c74edb43fc6cf5213f511a927f2b28148b"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccaa0a401fc02e8828a5bedfd80f8cd389d24f65e5ca3954d72c6582495b4bcf"}, + {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7bc9e8bc11bac40f905640acd41cbeaa87209e7e1f57ade386da658092dc16"}, + {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3582b34b70543a1ed6944aca75e219e1192661a63da4d039d088a09c67543b08"}, + {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c23dfa91481de880890d17aa7b91d586a4746a4c2aa9a145bebdbaf233768d5"}, + {file = "orjson-3.10.3-cp311-none-win32.whl", hash = "sha256:1770e2a0eae728b050705206d84eda8b074b65ee835e7f85c919f5705b006c9b"}, + {file = "orjson-3.10.3-cp311-none-win_amd64.whl", hash = "sha256:93433b3c1f852660eb5abdc1f4dd0ced2be031ba30900433223b28ee0140cde5"}, + {file = "orjson-3.10.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a39aa73e53bec8d410875683bfa3a8edf61e5a1c7bb4014f65f81d36467ea098"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0943a96b3fa09bee1afdfccc2cb236c9c64715afa375b2af296c73d91c23eab2"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e852baafceff8da3c9defae29414cc8513a1586ad93e45f27b89a639c68e8176"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18566beb5acd76f3769c1d1a7ec06cdb81edc4d55d2765fb677e3eaa10fa99e0"}, + {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd2218d5a3aa43060efe649ec564ebedec8ce6ae0a43654b81376216d5ebd42"}, + {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cf20465e74c6e17a104ecf01bf8cd3b7b252565b4ccee4548f18b012ff2f8069"}, + {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ba7f67aa7f983c4345eeda16054a4677289011a478ca947cd69c0a86ea45e534"}, + {file = "orjson-3.10.3-cp312-none-win32.whl", hash = "sha256:17e0713fc159abc261eea0f4feda611d32eabc35708b74bef6ad44f6c78d5ea0"}, + {file = "orjson-3.10.3-cp312-none-win_amd64.whl", hash = "sha256:4c895383b1ec42b017dd2c75ae8a5b862fc489006afde06f14afbdd0309b2af0"}, + {file = "orjson-3.10.3-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:be2719e5041e9fb76c8c2c06b9600fe8e8584e6980061ff88dcbc2691a16d20d"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0175a5798bdc878956099f5c54b9837cb62cfbf5d0b86ba6d77e43861bcec2"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:978be58a68ade24f1af7758626806e13cff7748a677faf95fbb298359aa1e20d"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16bda83b5c61586f6f788333d3cf3ed19015e3b9019188c56983b5a299210eb5"}, + {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ad1f26bea425041e0a1adad34630c4825a9e3adec49079b1fb6ac8d36f8b754"}, + {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9e253498bee561fe85d6325ba55ff2ff08fb5e7184cd6a4d7754133bd19c9195"}, + {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0a62f9968bab8a676a164263e485f30a0b748255ee2f4ae49a0224be95f4532b"}, + {file = "orjson-3.10.3-cp38-none-win32.whl", hash = "sha256:8d0b84403d287d4bfa9bf7d1dc298d5c1c5d9f444f3737929a66f2fe4fb8f134"}, + {file = "orjson-3.10.3-cp38-none-win_amd64.whl", hash = "sha256:8bc7a4df90da5d535e18157220d7915780d07198b54f4de0110eca6b6c11e290"}, + {file = "orjson-3.10.3-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9059d15c30e675a58fdcd6f95465c1522b8426e092de9fff20edebfdc15e1cb0"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d40c7f7938c9c2b934b297412c067936d0b54e4b8ab916fd1a9eb8f54c02294"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4a654ec1de8fdaae1d80d55cee65893cb06494e124681ab335218be6a0691e7"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:831c6ef73f9aa53c5f40ae8f949ff7681b38eaddb6904aab89dca4d85099cb78"}, + {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99b880d7e34542db89f48d14ddecbd26f06838b12427d5a25d71baceb5ba119d"}, + {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e5e176c994ce4bd434d7aafb9ecc893c15f347d3d2bbd8e7ce0b63071c52e25"}, + {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b69a58a37dab856491bf2d3bbf259775fdce262b727f96aafbda359cb1d114d8"}, + {file = "orjson-3.10.3-cp39-none-win32.whl", hash = "sha256:b8d4d1a6868cde356f1402c8faeb50d62cee765a1f7ffcfd6de732ab0581e063"}, + {file = "orjson-3.10.3-cp39-none-win_amd64.whl", hash = "sha256:5102f50c5fc46d94f2033fe00d392588564378260d64377aec702f21a7a22912"}, + {file = "orjson-3.10.3.tar.gz", hash = "sha256:2b166507acae7ba2f7c315dcf185a9111ad5e992ac81f2d507aac39193c2c818"}, +] + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "protobuf" +version = "5.27.0" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-5.27.0-cp310-abi3-win32.whl", hash = "sha256:2f83bf341d925650d550b8932b71763321d782529ac0eaf278f5242f513cc04e"}, + {file = "protobuf-5.27.0-cp310-abi3-win_amd64.whl", hash = "sha256:b276e3f477ea1eebff3c2e1515136cfcff5ac14519c45f9b4aa2f6a87ea627c4"}, + {file = "protobuf-5.27.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:744489f77c29174328d32f8921566fb0f7080a2f064c5137b9d6f4b790f9e0c1"}, + {file = "protobuf-5.27.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:f51f33d305e18646f03acfdb343aac15b8115235af98bc9f844bf9446573827b"}, + {file = "protobuf-5.27.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:56937f97ae0dcf4e220ff2abb1456c51a334144c9960b23597f044ce99c29c89"}, + {file = "protobuf-5.27.0-cp38-cp38-win32.whl", hash = "sha256:a17f4d664ea868102feaa30a674542255f9f4bf835d943d588440d1f49a3ed15"}, + {file = "protobuf-5.27.0-cp38-cp38-win_amd64.whl", hash = "sha256:aabbbcf794fbb4c692ff14ce06780a66d04758435717107c387f12fb477bf0d8"}, + {file = "protobuf-5.27.0-cp39-cp39-win32.whl", hash = "sha256:587be23f1212da7a14a6c65fd61995f8ef35779d4aea9e36aad81f5f3b80aec5"}, + {file = "protobuf-5.27.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cb65fc8fba680b27cf7a07678084c6e68ee13cab7cace734954c25a43da6d0f"}, + {file = "protobuf-5.27.0-py3-none-any.whl", hash = "sha256:673ad60f1536b394b4fa0bcd3146a4130fcad85bfe3b60eaa86d6a0ace0fa374"}, + {file = "protobuf-5.27.0.tar.gz", hash = "sha256:07f2b9a15255e3cf3f137d884af7972407b556a7a220912b252f26dc3121e6bf"}, +] + +[[package]] +name = "pydantic" +version = "2.7.1" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.18.2" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.18.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pymilvus" +version = "2.4.3" +description = "Python Sdk for Milvus" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymilvus-2.4.3-py3-none-any.whl", hash = "sha256:38239e89f8d739f665141d0b80908990b5f59681e889e135c234a4a45669a5c8"}, + {file = "pymilvus-2.4.3.tar.gz", hash = "sha256:703ac29296cdce03d6dc2aaebbe959e57745c141a94150e371dc36c61c226cc1"}, +] + +[package.dependencies] +environs = "<=9.5.0" +grpcio = ">=1.49.1,<=1.63.0" +milvus-lite = ">=2.4.0,<2.5.0" +numpy = {version = "<1.25.0", markers = "python_version <= \"3.8\""} +pandas = ">=1.2.4" +protobuf = ">=3.20.0" +setuptools = ">=67" +ujson = ">=2.0.0" + +[package.extras] +bulk-writer = ["azure-storage-blob", "minio (>=7.0.0)", "pyarrow (>=12.0.0)", "requests"] +dev = ["black", "grpcio (==1.62.2)", "grpcio-testing (==1.62.2)", "grpcio-tools (==1.62.2)", "pytest (>=5.3.4)", "pytest-cov (>=2.8.1)", "pytest-timeout (>=1.3.4)", "ruff (>0.4.0)"] +model = ["milvus-model (>=0.1.0)"] + +[[package]] +name = "pytest" +version = "7.4.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.21.2" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"}, + {file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] + +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-watcher" +version = "0.3.5" +description = "Automatically rerun your tests on file modifications" +optional = false +python-versions = ">=3.7.0,<4.0.0" +files = [ + {file = "pytest_watcher-0.3.5-py3-none-any.whl", hash = "sha256:af00ca52c7be22dc34c0fd3d7ffef99057207a73b05dc5161fe3b2fe91f58130"}, + {file = "pytest_watcher-0.3.5.tar.gz", hash = "sha256:8896152460ba2b1a8200c12117c6611008ec96c8b2d811f0a05ab8a82b043ff8"}, +] + +[package.dependencies] +tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""} +watchdog = ">=2.0.0" + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "requests" +version = "2.32.2" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, + {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "ruff" +version = "0.1.15" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, + {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, + {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, + {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, + {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, +] + +[[package]] +name = "scipy" +version = "1.9.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[package.dependencies] +numpy = ">=1.18.5,<1.26.0" + +[package.extras] +dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] +test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "setuptools" +version = "70.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, + {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "syrupy" +version = "4.6.1" +description = "Pytest Snapshot Test Utility" +optional = false +python-versions = ">=3.8.1,<4" +files = [ + {file = "syrupy-4.6.1-py3-none-any.whl", hash = "sha256:203e52f9cb9fa749cf683f29bd68f02c16c3bc7e7e5fe8f2fc59bdfe488ce133"}, + {file = "syrupy-4.6.1.tar.gz", hash = "sha256:37a835c9ce7857eeef86d62145885e10b3cb9615bc6abeb4ce404b3f18e1bb36"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9.0.0" + +[[package]] +name = "tenacity" +version = "8.3.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-8.3.0-py3-none-any.whl", hash = "sha256:3649f6443dbc0d9b01b9d8020a9c4ec7a1ff5f6f3c6c8a036ef371f573fe9185"}, + {file = "tenacity-8.3.0.tar.gz", hash = "sha256:953d4e6ad24357bceffbc9707bc74349aca9d245f68eb65419cf0c249a1949a2"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "types-requests" +version = "2.32.0.20240523" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-requests-2.32.0.20240523.tar.gz", hash = "sha256:26b8a6de32d9f561192b9942b41c0ab2d8010df5677ca8aa146289d11d505f57"}, + {file = "types_requests-2.32.0.20240523-py3-none-any.whl", hash = "sha256:f19ed0e2daa74302069bbbbf9e82902854ffa780bc790742a810a9aaa52f65ec"}, +] + +[package.dependencies] +urllib3 = ">=2" + +[[package]] +name = "typing-extensions" +version = "4.12.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, + {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, +] + +[[package]] +name = "tzdata" +version = "2024.1" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, +] + +[[package]] +name = "ujson" +version = "5.10.0" +description = "Ultra fast JSON encoder and decoder for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ujson-5.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2601aa9ecdbee1118a1c2065323bda35e2c5a2cf0797ef4522d485f9d3ef65bd"}, + {file = "ujson-5.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:348898dd702fc1c4f1051bc3aacbf894caa0927fe2c53e68679c073375f732cf"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22cffecf73391e8abd65ef5f4e4dd523162a3399d5e84faa6aebbf9583df86d6"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26b0e2d2366543c1bb4fbd457446f00b0187a2bddf93148ac2da07a53fe51569"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:caf270c6dba1be7a41125cd1e4fc7ba384bf564650beef0df2dd21a00b7f5770"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a245d59f2ffe750446292b0094244df163c3dc96b3ce152a2c837a44e7cda9d1"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:94a87f6e151c5f483d7d54ceef83b45d3a9cca7a9cb453dbdbb3f5a6f64033f5"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:29b443c4c0a113bcbb792c88bea67b675c7ca3ca80c3474784e08bba01c18d51"}, + {file = "ujson-5.10.0-cp310-cp310-win32.whl", hash = "sha256:c18610b9ccd2874950faf474692deee4223a994251bc0a083c114671b64e6518"}, + {file = "ujson-5.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:924f7318c31874d6bb44d9ee1900167ca32aa9b69389b98ecbde34c1698a250f"}, + {file = "ujson-5.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a5b366812c90e69d0f379a53648be10a5db38f9d4ad212b60af00bd4048d0f00"}, + {file = "ujson-5.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:502bf475781e8167f0f9d0e41cd32879d120a524b22358e7f205294224c71126"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b91b5d0d9d283e085e821651184a647699430705b15bf274c7896f23fe9c9d8"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:129e39af3a6d85b9c26d5577169c21d53821d8cf68e079060602e861c6e5da1b"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f77b74475c462cb8b88680471193064d3e715c7c6074b1c8c412cb526466efe9"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7ec0ca8c415e81aa4123501fee7f761abf4b7f386aad348501a26940beb1860f"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab13a2a9e0b2865a6c6db9271f4b46af1c7476bfd51af1f64585e919b7c07fd4"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:57aaf98b92d72fc70886b5a0e1a1ca52c2320377360341715dd3933a18e827b1"}, + {file = "ujson-5.10.0-cp311-cp311-win32.whl", hash = "sha256:2987713a490ceb27edff77fb184ed09acdc565db700ee852823c3dc3cffe455f"}, + {file = "ujson-5.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f00ea7e00447918ee0eff2422c4add4c5752b1b60e88fcb3c067d4a21049a720"}, + {file = "ujson-5.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98ba15d8cbc481ce55695beee9f063189dce91a4b08bc1d03e7f0152cd4bbdd5"}, + {file = "ujson-5.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9d2edbf1556e4f56e50fab7d8ff993dbad7f54bac68eacdd27a8f55f433578e"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6627029ae4f52d0e1a2451768c2c37c0c814ffc04f796eb36244cf16b8e57043"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ccb77b3e40b151e20519c6ae6d89bfe3f4c14e8e210d910287f778368bb3d1"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3caf9cd64abfeb11a3b661329085c5e167abbe15256b3b68cb5d914ba7396f3"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6e32abdce572e3a8c3d02c886c704a38a1b015a1fb858004e03d20ca7cecbb21"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a65b6af4d903103ee7b6f4f5b85f1bfd0c90ba4eeac6421aae436c9988aa64a2"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:604a046d966457b6cdcacc5aa2ec5314f0e8c42bae52842c1e6fa02ea4bda42e"}, + {file = "ujson-5.10.0-cp312-cp312-win32.whl", hash = "sha256:6dea1c8b4fc921bf78a8ff00bbd2bfe166345f5536c510671bccececb187c80e"}, + {file = "ujson-5.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:38665e7d8290188b1e0d57d584eb8110951a9591363316dd41cf8686ab1d0abc"}, + {file = "ujson-5.10.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:618efd84dc1acbd6bff8eaa736bb6c074bfa8b8a98f55b61c38d4ca2c1f7f287"}, + {file = "ujson-5.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38d5d36b4aedfe81dfe251f76c0467399d575d1395a1755de391e58985ab1c2e"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67079b1f9fb29ed9a2914acf4ef6c02844b3153913eb735d4bf287ee1db6e557"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d0e0ceeb8fe2468c70ec0c37b439dd554e2aa539a8a56365fd761edb418988"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59e02cd37bc7c44d587a0ba45347cc815fb7a5fe48de16bf05caa5f7d0d2e816"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a890b706b64e0065f02577bf6d8ca3b66c11a5e81fb75d757233a38c07a1f20"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:621e34b4632c740ecb491efc7f1fcb4f74b48ddb55e65221995e74e2d00bbff0"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9500e61fce0cfc86168b248104e954fead61f9be213087153d272e817ec7b4f"}, + {file = "ujson-5.10.0-cp313-cp313-win32.whl", hash = "sha256:4c4fc16f11ac1612f05b6f5781b384716719547e142cfd67b65d035bd85af165"}, + {file = "ujson-5.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:4573fd1695932d4f619928fd09d5d03d917274381649ade4328091ceca175539"}, + {file = "ujson-5.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a984a3131da7f07563057db1c3020b1350a3e27a8ec46ccbfbf21e5928a43050"}, + {file = "ujson-5.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73814cd1b9db6fc3270e9d8fe3b19f9f89e78ee9d71e8bd6c9a626aeaeaf16bd"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61e1591ed9376e5eddda202ec229eddc56c612b61ac6ad07f96b91460bb6c2fb"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2c75269f8205b2690db4572a4a36fe47cd1338e4368bc73a7a0e48789e2e35a"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7223f41e5bf1f919cd8d073e35b229295aa8e0f7b5de07ed1c8fddac63a6bc5d"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d4dc2fd6b3067c0782e7002ac3b38cf48608ee6366ff176bbd02cf969c9c20fe"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:232cc85f8ee3c454c115455195a205074a56ff42608fd6b942aa4c378ac14dd7"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cc6139531f13148055d691e442e4bc6601f6dba1e6d521b1585d4788ab0bfad4"}, + {file = "ujson-5.10.0-cp38-cp38-win32.whl", hash = "sha256:e7ce306a42b6b93ca47ac4a3b96683ca554f6d35dd8adc5acfcd55096c8dfcb8"}, + {file = "ujson-5.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:e82d4bb2138ab05e18f089a83b6564fee28048771eb63cdecf4b9b549de8a2cc"}, + {file = "ujson-5.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dfef2814c6b3291c3c5f10065f745a1307d86019dbd7ea50e83504950136ed5b"}, + {file = "ujson-5.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4734ee0745d5928d0ba3a213647f1c4a74a2a28edc6d27b2d6d5bd9fa4319e27"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47ebb01bd865fdea43da56254a3930a413f0c5590372a1241514abae8aa7c76"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee5e97c2496874acbf1d3e37b521dd1f307349ed955e62d1d2f05382bc36dd5"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7490655a2272a2d0b072ef16b0b58ee462f4973a8f6bbe64917ce5e0a256f9c0"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba17799fcddaddf5c1f75a4ba3fd6441f6a4f1e9173f8a786b42450851bd74f1"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2aff2985cef314f21d0fecc56027505804bc78802c0121343874741650a4d3d1"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ad88ac75c432674d05b61184178635d44901eb749786c8eb08c102330e6e8996"}, + {file = "ujson-5.10.0-cp39-cp39-win32.whl", hash = "sha256:2544912a71da4ff8c4f7ab5606f947d7299971bdd25a45e008e467ca638d13c9"}, + {file = "ujson-5.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:3ff201d62b1b177a46f113bb43ad300b424b7847f9c5d38b1b4ad8f75d4a282a"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5b6fee72fa77dc172a28f21693f64d93166534c263adb3f96c413ccc85ef6e64"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:61d0af13a9af01d9f26d2331ce49bb5ac1fb9c814964018ac8df605b5422dcb3"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecb24f0bdd899d368b715c9e6664166cf694d1e57be73f17759573a6986dd95a"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbd8fd427f57a03cff3ad6574b5e299131585d9727c8c366da4624a9069ed746"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beeaf1c48e32f07d8820c705ff8e645f8afa690cca1544adba4ebfa067efdc88"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:baed37ea46d756aca2955e99525cc02d9181de67f25515c468856c38d52b5f3b"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7663960f08cd5a2bb152f5ee3992e1af7690a64c0e26d31ba7b3ff5b2ee66337"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8640fb4072d36b08e95a3a380ba65779d356b2fee8696afeb7794cf0902d0a1"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78778a3aa7aafb11e7ddca4e29f46bc5139131037ad628cc10936764282d6753"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0111b27f2d5c820e7f2dbad7d48e3338c824e7ac4d2a12da3dc6061cc39c8e6"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:c66962ca7565605b355a9ed478292da628b8f18c0f2793021ca4425abf8b01e5"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ba43cc34cce49cf2d4bc76401a754a81202d8aa926d0e2b79f0ee258cb15d3a4"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac56eb983edce27e7f51d05bc8dd820586c6e6be1c5216a6809b0c668bb312b8"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44bd4b23a0e723bf8b10628288c2c7c335161d6840013d4d5de20e48551773b"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c10f4654e5326ec14a46bcdeb2b685d4ada6911050aa8baaf3501e57024b804"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0de4971a89a762398006e844ae394bd46991f7c385d7a6a3b93ba229e6dac17e"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e1402f0564a97d2a52310ae10a64d25bcef94f8dd643fcf5d310219d915484f7"}, + {file = "ujson-5.10.0.tar.gz", hash = "sha256:b3cd8f3c5d8c7738257f1018880444f7b7d9b66232c64649f562d7ba86ad4bc1"}, +] + +[[package]] +name = "urllib3" +version = "2.2.1" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "watchdog" +version = "4.0.1" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, + {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, + {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, + {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, + {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, + {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, + {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, + {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, + {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, + {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, + {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, + {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, + {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, + {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.8.1,<4.0" +content-hash = "bdd4f827b6ae022134ab2be9ee987e3247d6be99c1d1cb2e403448b8b0677a4a" diff --git a/libs/partners/milvus/pyproject.toml b/libs/partners/milvus/pyproject.toml new file mode 100644 index 00000000000..e852373a74e --- /dev/null +++ b/libs/partners/milvus/pyproject.toml @@ -0,0 +1,99 @@ +[tool.poetry] +name = "langchain-milvus" +version = "0.1.0" +description = "An integration package connecting Milvus and LangChain" +authors = [] +readme = "README.md" +repository = "https://github.com/langchain-ai/langchain" +license = "MIT" + +[tool.poetry.urls] +"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/milvus" + +[tool.poetry.dependencies] +python = ">=3.8.1,<4.0" +langchain-core = "^0.2.0" +pymilvus = "^2.4.3" +scipy = "^1.7" + +[tool.poetry.group.test] +optional = true + +[tool.poetry.group.test.dependencies] +pytest = "^7.3.0" +freezegun = "^1.2.2" +pytest-mock = "^3.10.0" +syrupy = "^4.0.2" +pytest-watcher = "^0.3.4" +pytest-asyncio = "^0.21.1" +langchain-core = { path = "../../core", develop = true } + +[tool.poetry.group.codespell] +optional = true + +[tool.poetry.group.codespell.dependencies] +codespell = "^2.2.0" + +[tool.poetry.group.test_integration] +optional = true + +[tool.poetry.group.test_integration.dependencies] + +[tool.poetry.group.lint] +optional = true + +[tool.poetry.group.lint.dependencies] +ruff = "^0.1.5" + +[tool.poetry.group.typing.dependencies] +mypy = "^0.991" +langchain-core = { path = "../../core", develop = true } +types-requests = "^2" + +[tool.poetry.group.dev] +optional = true + +[tool.poetry.group.dev.dependencies] +langchain-core = { path = "../../core", develop = true } + +[tool.ruff] +select = [ + "E", # pycodestyle + "F", # pyflakes + "I", # isort + "T201", # print +] + +[tool.mypy] +disallow_untyped_defs = "True" + +[[tool.mypy.overrides]] +module = ["pymilvus"] +ignore_missing_imports = "True" + +[tool.coverage.run] +omit = ["tests/*"] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +# --strict-markers will raise errors on unknown marks. +# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks +# +# https://docs.pytest.org/en/7.1.x/reference/reference.html +# --strict-config any warnings encountered while parsing the `pytest` +# section of the configuration file raise errors. +# +# https://github.com/tophat/syrupy +# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite. +addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5" +# Registering custom markers. +# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers +markers = [ + "requires: mark tests as requiring a specific library", + "asyncio: mark tests as requiring asyncio", + "compile: mark placeholder test used to compile integration tests without running them", +] +asyncio_mode = "auto" diff --git a/libs/partners/milvus/scripts/check_imports.py b/libs/partners/milvus/scripts/check_imports.py new file mode 100644 index 00000000000..365f5fa118d --- /dev/null +++ b/libs/partners/milvus/scripts/check_imports.py @@ -0,0 +1,17 @@ +import sys +import traceback +from importlib.machinery import SourceFileLoader + +if __name__ == "__main__": + files = sys.argv[1:] + has_failure = False + for file in files: + try: + SourceFileLoader("x", file).load_module() + except Exception: + has_faillure = True + print(file) # noqa: T201 + traceback.print_exc() + print() # noqa: T201 + + sys.exit(1 if has_failure else 0) diff --git a/libs/partners/milvus/scripts/check_pydantic.sh b/libs/partners/milvus/scripts/check_pydantic.sh new file mode 100755 index 00000000000..06b5bb81ae2 --- /dev/null +++ b/libs/partners/milvus/scripts/check_pydantic.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# This script searches for lines starting with "import pydantic" or "from pydantic" +# in tracked files within a Git repository. +# +# Usage: ./scripts/check_pydantic.sh /path/to/repository + +# Check if a path argument is provided +if [ $# -ne 1 ]; then + echo "Usage: $0 /path/to/repository" + exit 1 +fi + +repository_path="$1" + +# Search for lines matching the pattern within the specified repository +result=$(git -C "$repository_path" grep -E '^import pydantic|^from pydantic') + +# Check if any matching lines were found +if [ -n "$result" ]; then + echo "ERROR: The following lines need to be updated:" + echo "$result" + echo "Please replace the code with an import from langchain_core.pydantic_v1." + echo "For example, replace 'from pydantic import BaseModel'" + echo "with 'from langchain_core.pydantic_v1 import BaseModel'" + exit 1 +fi diff --git a/libs/partners/milvus/scripts/lint_imports.sh b/libs/partners/milvus/scripts/lint_imports.sh new file mode 100755 index 00000000000..695613c7ba8 --- /dev/null +++ b/libs/partners/milvus/scripts/lint_imports.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -eu + +# Initialize a variable to keep track of errors +errors=0 + +# make sure not importing from langchain or langchain_experimental +git --no-pager grep '^from langchain\.' . && errors=$((errors+1)) +git --no-pager grep '^from langchain_experimental\.' . && errors=$((errors+1)) + +# Decide on an exit status based on the errors +if [ "$errors" -gt 0 ]; then + exit 1 +else + exit 0 +fi diff --git a/libs/partners/milvus/tests/__init__.py b/libs/partners/milvus/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/integration_tests/__init__.py b/libs/partners/milvus/tests/integration_tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/integration_tests/retrievers/__init__.py b/libs/partners/milvus/tests/integration_tests/retrievers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/integration_tests/test_compile.py b/libs/partners/milvus/tests/integration_tests/test_compile.py new file mode 100644 index 00000000000..33ecccdfa0f --- /dev/null +++ b/libs/partners/milvus/tests/integration_tests/test_compile.py @@ -0,0 +1,7 @@ +import pytest + + +@pytest.mark.compile +def test_placeholder() -> None: + """Used for compiling integration tests without running any real tests.""" + pass diff --git a/libs/partners/milvus/tests/integration_tests/utils.py b/libs/partners/milvus/tests/integration_tests/utils.py new file mode 100644 index 00000000000..f3ef87d2f2a --- /dev/null +++ b/libs/partners/milvus/tests/integration_tests/utils.py @@ -0,0 +1,40 @@ +from typing import List + +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings + +fake_texts = ["foo", "bar", "baz"] + + +class FakeEmbeddings(Embeddings): + """Fake embeddings functionality for testing.""" + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Return simple embeddings. + Embeddings encode each text as its index.""" + return [[float(1.0)] * 9 + [float(i)] for i in range(len(texts))] + + async def aembed_documents(self, texts: List[str]) -> List[List[float]]: + return self.embed_documents(texts) + + def embed_query(self, text: str) -> List[float]: + """Return constant query embeddings. + Embeddings are identical to embed_documents(texts)[0]. + Distance to each text will be that text's index, + as it was passed to embed_documents.""" + return [float(1.0)] * 9 + [float(0.0)] + + async def aembed_query(self, text: str) -> List[float]: + return self.embed_query(text) + + +def assert_docs_equal_without_pk( + docs1: List[Document], docs2: List[Document], pk_field: str = "pk" +) -> None: + """Assert two lists of Documents are equal, ignoring the primary key field.""" + assert len(docs1) == len(docs2) + for doc1, doc2 in zip(docs1, docs2): + assert doc1.page_content == doc2.page_content + doc1.metadata.pop(pk_field, None) + doc2.metadata.pop(pk_field, None) + assert doc1.metadata == doc2.metadata diff --git a/libs/partners/milvus/tests/integration_tests/vectorstores/__init__.py b/libs/partners/milvus/tests/integration_tests/vectorstores/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/integration_tests/vectorstores/test_milvus.py b/libs/partners/milvus/tests/integration_tests/vectorstores/test_milvus.py new file mode 100644 index 00000000000..5eaf2abcc90 --- /dev/null +++ b/libs/partners/milvus/tests/integration_tests/vectorstores/test_milvus.py @@ -0,0 +1,184 @@ +"""Test Milvus functionality.""" +from typing import Any, List, Optional + +from langchain_core.documents import Document + +from langchain_milvus.vectorstores import Milvus +from tests.integration_tests.utils import ( + FakeEmbeddings, + assert_docs_equal_without_pk, + fake_texts, +) + +# +# To run this test properly, please start a Milvus server with the following command: +# +# ```shell +# wget https://raw.githubusercontent.com/milvus-io/milvus/master/scripts/standalone_embed.sh +# bash standalone_embed.sh start +# ``` +# +# Here is the reference: +# https://milvus.io/docs/install_standalone-docker.md +# + + +def _milvus_from_texts( + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + drop: bool = True, +) -> Milvus: + return Milvus.from_texts( + fake_texts, + FakeEmbeddings(), + metadatas=metadatas, + ids=ids, + # connection_args={"uri": "http://127.0.0.1:19530"}, + connection_args={"uri": "./milvus_demo.db"}, + drop_old=drop, + ) + + +def _get_pks(expr: str, docsearch: Milvus) -> List[Any]: + return docsearch.get_pks(expr) # type: ignore[return-value] + + +def test_milvus() -> None: + """Test end to end construction and search.""" + docsearch = _milvus_from_texts() + output = docsearch.similarity_search("foo", k=1) + assert_docs_equal_without_pk(output, [Document(page_content="foo")]) + + +def test_milvus_with_metadata() -> None: + """Test with metadata""" + docsearch = _milvus_from_texts(metadatas=[{"label": "test"}] * len(fake_texts)) + output = docsearch.similarity_search("foo", k=1) + assert_docs_equal_without_pk( + output, [Document(page_content="foo", metadata={"label": "test"})] + ) + + +def test_milvus_with_id() -> None: + """Test with ids""" + ids = ["id_" + str(i) for i in range(len(fake_texts))] + docsearch = _milvus_from_texts(ids=ids) + output = docsearch.similarity_search("foo", k=1) + assert_docs_equal_without_pk(output, [Document(page_content="foo")]) + + output = docsearch.delete(ids=ids) + assert output.delete_count == len(fake_texts) # type: ignore[attr-defined] + + try: + ids = ["dup_id" for _ in fake_texts] + _milvus_from_texts(ids=ids) + except Exception as e: + assert isinstance(e, AssertionError) + + +def test_milvus_with_score() -> None: + """Test end to end construction and search with scores and IDs.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + output = docsearch.similarity_search_with_score("foo", k=3) + docs = [o[0] for o in output] + scores = [o[1] for o in output] + assert_docs_equal_without_pk( + docs, + [ + Document(page_content="foo", metadata={"page": 0}), + Document(page_content="bar", metadata={"page": 1}), + Document(page_content="baz", metadata={"page": 2}), + ], + ) + assert scores[0] < scores[1] < scores[2] + + +def test_milvus_max_marginal_relevance_search() -> None: + """Test end to end construction and MRR search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3) + assert_docs_equal_without_pk( + output, + [ + Document(page_content="foo", metadata={"page": 0}), + Document(page_content="baz", metadata={"page": 2}), + ], + ) + + +def test_milvus_add_extra() -> None: + """Test end to end construction and MRR search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + + docsearch.add_texts(texts, metadatas) + + output = docsearch.similarity_search("foo", k=10) + assert len(output) == 6 + + +def test_milvus_no_drop() -> None: + """Test end to end construction and MRR search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + del docsearch + + docsearch = _milvus_from_texts(metadatas=metadatas, drop=False) + + output = docsearch.similarity_search("foo", k=10) + assert len(output) == 6 + + +def test_milvus_get_pks() -> None: + """Test end to end construction and get pks with expr""" + texts = ["foo", "bar", "baz"] + metadatas = [{"id": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + expr = "id in [1,2]" + output = _get_pks(expr, docsearch) + assert len(output) == 2 + + +def test_milvus_delete_entities() -> None: + """Test end to end construction and delete entities""" + texts = ["foo", "bar", "baz"] + metadatas = [{"id": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + expr = "id in [1,2]" + pks = _get_pks(expr, docsearch) + result = docsearch.delete(pks) + assert result.delete_count == 2 # type: ignore[attr-defined] + + +def test_milvus_upsert_entities() -> None: + """Test end to end construction and upsert entities""" + texts = ["foo", "bar", "baz"] + metadatas = [{"id": i} for i in range(len(texts))] + docsearch = _milvus_from_texts(metadatas=metadatas) + expr = "id in [1,2]" + pks = _get_pks(expr, docsearch) + documents = [ + Document(page_content="test_1", metadata={"id": 1}), + Document(page_content="test_2", metadata={"id": 3}), + ] + ids = docsearch.upsert(pks, documents) + assert len(ids) == 2 # type: ignore[arg-type] + + +# if __name__ == "__main__": +# test_milvus() +# test_milvus_with_metadata() +# test_milvus_with_id() +# test_milvus_with_score() +# test_milvus_max_marginal_relevance_search() +# test_milvus_add_extra() +# test_milvus_no_drop() +# test_milvus_get_pks() +# test_milvus_delete_entities() +# test_milvus_upsert_entities() diff --git a/libs/partners/milvus/tests/unit_tests/__init__.py b/libs/partners/milvus/tests/unit_tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/unit_tests/retrievers/__init__.py b/libs/partners/milvus/tests/unit_tests/retrievers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/unit_tests/test_imports.py b/libs/partners/milvus/tests/unit_tests/test_imports.py new file mode 100644 index 00000000000..8be170e36f2 --- /dev/null +++ b/libs/partners/milvus/tests/unit_tests/test_imports.py @@ -0,0 +1,12 @@ +from langchain_milvus import __all__ + +EXPECTED_ALL = [ + "Milvus", + "MilvusCollectionHybridSearchRetriever", + "Zilliz", + "ZillizCloudPipelineRetriever", +] + + +def test_all_imports() -> None: + assert sorted(EXPECTED_ALL) == sorted(__all__) diff --git a/libs/partners/milvus/tests/unit_tests/vectorstores/__init__.py b/libs/partners/milvus/tests/unit_tests/vectorstores/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/libs/partners/milvus/tests/unit_tests/vectorstores/test_milvus.py b/libs/partners/milvus/tests/unit_tests/vectorstores/test_milvus.py new file mode 100644 index 00000000000..1ef2314e795 --- /dev/null +++ b/libs/partners/milvus/tests/unit_tests/vectorstores/test_milvus.py @@ -0,0 +1,17 @@ +import os +from tempfile import TemporaryDirectory +from unittest.mock import Mock + +from langchain_milvus.vectorstores import Milvus + + +def test_initialization() -> None: + """Test integration milvus initialization.""" + embedding = Mock() + with TemporaryDirectory() as tmp_dir: + Milvus( + embedding_function=embedding, + connection_args={ + "uri": os.path.join(tmp_dir, "milvus.db"), + }, + ) diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index 2a18876a0f7..271941def89 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -12,6 +12,7 @@ from typing import ( Dict, Iterator, List, + Literal, Optional, Sequence, Tuple, @@ -49,6 +50,10 @@ from langchain_core.messages import ( ToolCall, ToolMessage, ) +from langchain_core.output_parsers import ( + JsonOutputParser, + PydanticOutputParser, +) from langchain_core.output_parsers.base import OutputParserLike from langchain_core.output_parsers.openai_tools import ( JsonOutputKeyToolsParser, @@ -608,8 +613,9 @@ class ChatMistralAI(BaseChatModel): def with_structured_output( self, - schema: Union[Dict, Type[BaseModel]], + schema: Optional[Union[Dict, Type[BaseModel]]] = None, *, + method: Literal["function_calling", "json_mode"] = "function_calling", include_raw: bool = False, **kwargs: Any, ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: @@ -622,6 +628,12 @@ class ChatMistralAI(BaseChatModel): attributes will be validated, whereas with a dict they will not be. If `method` is "function_calling" and `schema` is a dict, then the dict must match the OpenAI function-calling spec. + method: The method for steering model generation, either "function_calling" + or "json_mode". If "function_calling" then the schema will be converted + to an OpenAI function and the returned model will make use of the + function-calling API. If "json_mode" then OpenAI's JSON mode will be + used. Note that if using "json_mode" then you must include instructions + for formatting the output into the desired schema into the model call. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True then both the raw model response (a BaseMessage) and the parsed model @@ -709,21 +721,81 @@ class ChatMistralAI(BaseChatModel): # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } + Example: JSON mode, Pydantic schema (method="json_mode", include_raw=True): + .. code-block:: + + from langchain_mistralai import ChatMistralAI + from langchain_core.pydantic_v1 import BaseModel + + class AnswerWithJustification(BaseModel): + answer: str + justification: str + + llm = ChatMistralAI(model="mistral-large-latest", temperature=0) + structured_llm = llm.with_structured_output( + AnswerWithJustification, + method="json_mode", + include_raw=True + ) + + structured_llm.invoke( + "Answer the following question. " + "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n" + "What's heavier a pound of bricks or a pound of feathers?" + ) + # -> { + # 'raw': AIMessage(content='{\n "answer": "They are both the same weight.",\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'), + # 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'), + # 'parsing_error': None + # } + + Example: JSON mode, no schema (schema=None, method="json_mode", include_raw=True): + .. code-block:: + + from langchain_mistralai import ChatMistralAI + + structured_llm = llm.with_structured_output(method="json_mode", include_raw=True) + + structured_llm.invoke( + "Answer the following question. " + "Make sure to return a JSON blob with keys 'answer' and 'justification'.\n\n" + "What's heavier a pound of bricks or a pound of feathers?" + ) + # -> { + # 'raw': AIMessage(content='{\n "answer": "They are both the same weight.",\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \n}'), + # 'parsed': { + # 'answer': 'They are both the same weight.', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.' + # }, + # 'parsing_error': None + # } """ # noqa: E501 if kwargs: raise ValueError(f"Received unsupported arguments {kwargs}") is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel) - llm = self.bind_tools([schema], tool_choice="any") - if is_pydantic_schema: - output_parser: OutputParserLike = PydanticToolsParser( - tools=[schema], first_tool_only=True + if method == "function_calling": + if schema is None: + raise ValueError( + "schema must be specified when method is 'function_calling'. " + "Received None." + ) + llm = self.bind_tools([schema], tool_choice="any") + if is_pydantic_schema: + output_parser: OutputParserLike = PydanticToolsParser( + tools=[schema], first_tool_only=True + ) + else: + key_name = convert_to_openai_tool(schema)["function"]["name"] + output_parser = JsonOutputKeyToolsParser( + key_name=key_name, first_tool_only=True + ) + elif method == "json_mode": + llm = self.bind(response_format={"type": "json_object"}) + output_parser = ( + PydanticOutputParser(pydantic_object=schema) + if is_pydantic_schema + else JsonOutputParser() ) - else: - key_name = convert_to_openai_tool(schema)["function"]["name"] - output_parser = JsonOutputKeyToolsParser( - key_name=key_name, first_tool_only=True - ) - if include_raw: parser_assign = RunnablePassthrough.assign( parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None diff --git a/libs/partners/mistralai/tests/integration_tests/test_standard.py b/libs/partners/mistralai/tests/integration_tests/test_standard.py index d9b8ff19692..7ea8f1bee8f 100644 --- a/libs/partners/mistralai/tests/integration_tests/test_standard.py +++ b/libs/partners/mistralai/tests/integration_tests/test_standard.py @@ -20,3 +20,14 @@ class TestMistralStandard(ChatModelIntegrationTests): "model": "mistral-large-latest", "temperature": 0, } + + @pytest.mark.xfail(reason="Not implemented.") + def test_usage_metadata( + self, + chat_model_class: Type[BaseChatModel], + chat_model_params: dict, + ) -> None: + super().test_usage_metadata( + chat_model_class, + chat_model_params, + ) diff --git a/libs/partners/mongodb/tests/integration_tests/test_vectorstores.py b/libs/partners/mongodb/tests/integration_tests/test_vectorstores.py index 16d4b17bafa..451ff291134 100644 --- a/libs/partners/mongodb/tests/integration_tests/test_vectorstores.py +++ b/libs/partners/mongodb/tests/integration_tests/test_vectorstores.py @@ -51,7 +51,7 @@ class TestMongoDBAtlasVectorSearch: # insure the test collection is empty collection = get_collection() if collection.count_documents({}): - collection.delete_many({}) # type: ignore[index] # noqa: E501 + collection.delete_many({}) # type: ignore[index] @classmethod def teardown_class(cls) -> None: diff --git a/libs/partners/mongodb/tests/unit_tests/test_vectorstores.py b/libs/partners/mongodb/tests/unit_tests/test_vectorstores.py index 9c6c781208c..0dc4b7fa2af 100644 --- a/libs/partners/mongodb/tests/unit_tests/test_vectorstores.py +++ b/libs/partners/mongodb/tests/unit_tests/test_vectorstores.py @@ -46,7 +46,7 @@ class TestMongoDBAtlasVectorSearch: def setup_class(cls) -> None: # ensure the test collection is empty collection = get_collection() - assert collection.count_documents({}) == 0 # type: ignore[index] # noqa: E501 + assert collection.count_documents({}) == 0 # type: ignore[index] @classmethod def teardown_class(cls) -> None: diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 0f5133a4e57..f8202287872 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -81,7 +81,7 @@ class AzureChatOpenAI(BaseChatOpenAI): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. - """ # noqa: E501 + """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. @@ -178,7 +178,7 @@ class AzureChatOpenAI(BaseChatOpenAI): 'azure_endpoint="https://xxx.openai.azure.com/", ' 'azure_deployment="my-deployment"\n\n' "Or you can equivalently specify:\n\n" - 'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"' # noqa: E501 + 'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"' ) client_params = { "api_version": values["openai_api_version"], diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 1ede891316f..84459cc4d29 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -58,6 +58,7 @@ from langchain_core.messages import ( ToolMessage, ToolMessageChunk, ) +from langchain_core.messages.ai import UsageMetadata from langchain_core.output_parsers import ( JsonOutputParser, PydanticOutputParser, @@ -483,23 +484,36 @@ class BaseChatOpenAI(BaseChatModel): if not isinstance(chunk, dict): chunk = chunk.model_dump() if len(chunk["choices"]) == 0: - continue - choice = chunk["choices"][0] - if choice["delta"] is None: - continue - chunk = _convert_delta_to_message_chunk( - choice["delta"], default_chunk_class - ) - generation_info = {} - if finish_reason := choice.get("finish_reason"): - generation_info["finish_reason"] = finish_reason - logprobs = choice.get("logprobs") - if logprobs: - generation_info["logprobs"] = logprobs - default_chunk_class = chunk.__class__ - chunk = ChatGenerationChunk( - message=chunk, generation_info=generation_info or None - ) + if token_usage := chunk.get("usage"): + usage_metadata = UsageMetadata( + input_tokens=token_usage.get("prompt_tokens", 0), + output_tokens=token_usage.get("completion_tokens", 0), + total_tokens=token_usage.get("total_tokens", 0), + ) + chunk = ChatGenerationChunk( + message=default_chunk_class( + content="", usage_metadata=usage_metadata + ) + ) + else: + continue + else: + choice = chunk["choices"][0] + if choice["delta"] is None: + continue + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + generation_info = {} + if finish_reason := choice.get("finish_reason"): + generation_info["finish_reason"] = finish_reason + logprobs = choice.get("logprobs") + if logprobs: + generation_info["logprobs"] = logprobs + default_chunk_class = chunk.__class__ + chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info or None + ) if run_manager: run_manager.on_llm_new_token( chunk.text, chunk=chunk, logprobs=logprobs @@ -548,8 +562,15 @@ class BaseChatOpenAI(BaseChatModel): if response.get("error"): raise ValueError(response.get("error")) + token_usage = response.get("usage", {}) for res in response["choices"]: message = _convert_dict_to_message(res["message"]) + if token_usage and isinstance(message, AIMessage): + message.usage_metadata = { + "input_tokens": token_usage.get("prompt_tokens", 0), + "output_tokens": token_usage.get("completion_tokens", 0), + "total_tokens": token_usage.get("total_tokens", 0), + } generation_info = dict(finish_reason=res.get("finish_reason")) if "logprobs" in res: generation_info["logprobs"] = res["logprobs"] @@ -558,7 +579,6 @@ class BaseChatOpenAI(BaseChatModel): generation_info=generation_info, ) generations.append(gen) - token_usage = response.get("usage", {}) llm_output = { "token_usage": token_usage, "model_name": self.model_name, @@ -583,23 +603,36 @@ class BaseChatOpenAI(BaseChatModel): if not isinstance(chunk, dict): chunk = chunk.model_dump() if len(chunk["choices"]) == 0: - continue - choice = chunk["choices"][0] - if choice["delta"] is None: - continue - chunk = _convert_delta_to_message_chunk( - choice["delta"], default_chunk_class - ) - generation_info = {} - if finish_reason := choice.get("finish_reason"): - generation_info["finish_reason"] = finish_reason - logprobs = choice.get("logprobs") - if logprobs: - generation_info["logprobs"] = logprobs - default_chunk_class = chunk.__class__ - chunk = ChatGenerationChunk( - message=chunk, generation_info=generation_info or None - ) + if token_usage := chunk.get("usage"): + usage_metadata = UsageMetadata( + input_tokens=token_usage.get("prompt_tokens", 0), + output_tokens=token_usage.get("completion_tokens", 0), + total_tokens=token_usage.get("total_tokens", 0), + ) + chunk = ChatGenerationChunk( + message=default_chunk_class( + content="", usage_metadata=usage_metadata + ) + ) + else: + continue + else: + choice = chunk["choices"][0] + if choice["delta"] is None: + continue + chunk = _convert_delta_to_message_chunk( + choice["delta"], default_chunk_class + ) + generation_info = {} + if finish_reason := choice.get("finish_reason"): + generation_info["finish_reason"] = finish_reason + logprobs = choice.get("logprobs") + if logprobs: + generation_info["logprobs"] = logprobs + default_chunk_class = chunk.__class__ + chunk = ChatGenerationChunk( + message=chunk, generation_info=generation_info or None + ) if run_manager: await run_manager.on_llm_new_token( token=chunk.text, chunk=chunk, logprobs=logprobs @@ -1129,6 +1162,29 @@ class ChatOpenAI(BaseChatOpenAI): """Return whether this model can be serialized by Langchain.""" return True + def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]: + """Set default stream_options.""" + default_stream_options = {"include_usage": True} + stream_options = kwargs.get("stream_options", {}) + merged_stream_options = {**default_stream_options, **stream_options} + kwargs["stream_options"] = merged_stream_options + + return super()._stream(*args, **kwargs) + + async def _astream( + self, + *args: Any, + **kwargs: Any, + ) -> AsyncIterator[ChatGenerationChunk]: + """Set default stream_options.""" + default_stream_options = {"include_usage": True} + stream_options = kwargs.get("stream_options", {}) + merged_stream_options = {**default_stream_options, **stream_options} + kwargs["stream_options"] = merged_stream_options + + async for chunk in super()._astream(*args, **kwargs): + yield chunk + def _is_pydantic_class(obj: Any) -> bool: return isinstance(obj, type) and issubclass(obj, BaseModel) diff --git a/libs/partners/openai/langchain_openai/embeddings/azure.py b/libs/partners/openai/langchain_openai/embeddings/azure.py index de53c8058c2..f4a0bd18c96 100644 --- a/libs/partners/openai/langchain_openai/embeddings/azure.py +++ b/libs/partners/openai/langchain_openai/embeddings/azure.py @@ -49,7 +49,7 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. - """ # noqa: E501 + """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. diff --git a/libs/partners/openai/langchain_openai/llms/azure.py b/libs/partners/openai/langchain_openai/llms/azure.py index cf9be00c64a..35ad5e79675 100644 --- a/libs/partners/openai/langchain_openai/llms/azure.py +++ b/libs/partners/openai/langchain_openai/llms/azure.py @@ -54,7 +54,7 @@ class AzureOpenAI(BaseOpenAI): For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id. - """ # noqa: E501 + """ azure_ad_token_provider: Union[Callable[[], str], None] = None """A function that returns an Azure Active Directory token. diff --git a/libs/partners/openai/poetry.lock b/libs/partners/openai/poetry.lock index 476f3a12829..3c00a51e271 100644 --- a/libs/partners/openai/poetry.lock +++ b/libs/partners/openai/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -385,7 +385,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.2.0" +version = "0.2.2rc1" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -1268,4 +1268,4 @@ watchmedo = ["PyYAML (>=3.10)"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "2b9a8302c7d5c38538a198a7d16221d3094d73958378c984108deaac6fe7b2d0" +content-hash = "62f0a24221c64dc8035ccf7cca3f8ac2eaf47d653a441645c8021120833ecb52" diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 47c75c9a066..aed9d0274f1 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-openai" -version = "0.1.7" +version = "0.1.8rc1" description = "An integration package connecting OpenAI and LangChain" authors = [] readme = "README.md" @@ -12,8 +12,8 @@ license = "MIT" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain-core = ">=0.2.0,<0.3" -openai = "^1.24.0" +langchain-core = {version =">=0.2.2rc1,<0.3", allow-prereleases=true} +openai = "^1.26.0" tiktoken = ">=0.7,<1" [tool.poetry.group.test] diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index e86c457932c..db87949eb64 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -346,9 +346,34 @@ def test_stream() -> None: llm = ChatOpenAI() full: Optional[BaseMessageChunk] = None + chunks_with_token_counts = 0 for chunk in llm.stream("I'm Pickle Rick"): assert isinstance(chunk.content, str) full = chunk if full is None else full + chunk + assert isinstance(chunk, AIMessageChunk) + if chunk.usage_metadata is not None: + chunks_with_token_counts += 1 + if chunks_with_token_counts != 1: + raise AssertionError( + "Expected exactly one chunk with token counts. " + "AIMessageChunk aggregation adds counts. Check that " + "this is behaving properly." + ) + + # check token usage is populated + assert isinstance(full, AIMessageChunk) + assert full.usage_metadata is not None + assert full.usage_metadata["input_tokens"] > 0 + assert full.usage_metadata["output_tokens"] > 0 + assert full.usage_metadata["total_tokens"] > 0 + + # check not populated + aggregate: Optional[BaseMessageChunk] = None + for chunk in llm.stream("Hello", stream_options={"include_usage": False}): + assert isinstance(chunk.content, str) + aggregate = chunk if aggregate is None else aggregate + chunk + assert isinstance(aggregate, AIMessageChunk) + assert aggregate.usage_metadata is None async def test_astream() -> None: @@ -356,9 +381,34 @@ async def test_astream() -> None: llm = ChatOpenAI() full: Optional[BaseMessageChunk] = None + chunks_with_token_counts = 0 async for chunk in llm.astream("I'm Pickle Rick"): assert isinstance(chunk.content, str) full = chunk if full is None else full + chunk + assert isinstance(chunk, AIMessageChunk) + if chunk.usage_metadata is not None: + chunks_with_token_counts += 1 + if chunks_with_token_counts != 1: + raise AssertionError( + "Expected exactly one chunk with token counts. " + "AIMessageChunk aggregation adds counts. Check that " + "this is behaving properly." + ) + + # check token usage is populated + assert isinstance(full, AIMessageChunk) + assert full.usage_metadata is not None + assert full.usage_metadata["input_tokens"] > 0 + assert full.usage_metadata["output_tokens"] > 0 + assert full.usage_metadata["total_tokens"] > 0 + + # check not populated + aggregate: Optional[BaseMessageChunk] = None + async for chunk in llm.astream("Hello", stream_options={"include_usage": False}): + assert isinstance(chunk.content, str) + aggregate = chunk if aggregate is None else aggregate + chunk + assert isinstance(aggregate, AIMessageChunk) + assert aggregate.usage_metadata is None async def test_abatch() -> None: diff --git a/libs/partners/openai/tests/unit_tests/fake/callbacks.py b/libs/partners/openai/tests/unit_tests/fake/callbacks.py index b2bef343fff..db66f2acc9e 100644 --- a/libs/partners/openai/tests/unit_tests/fake/callbacks.py +++ b/libs/partners/openai/tests/unit_tests/fake/callbacks.py @@ -22,7 +22,7 @@ class BaseFakeCallbackHandler(BaseModel): ignore_retriever_: bool = False ignore_chat_model_: bool = False - # to allow for similar callback handlers that are not technicall equal + # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests diff --git a/libs/partners/prompty/tests/unit_tests/fake_callback_handler.py b/libs/partners/prompty/tests/unit_tests/fake_callback_handler.py index 86569fd0a31..fd68bebd2d9 100644 --- a/libs/partners/prompty/tests/unit_tests/fake_callback_handler.py +++ b/libs/partners/prompty/tests/unit_tests/fake_callback_handler.py @@ -22,7 +22,7 @@ class BaseFakeCallbackHandler(BaseModel): ignore_retriever_: bool = False ignore_chat_model_: bool = False - # to allow for similar callback handlers that are not technicall equal + # to allow for similar callback handlers that are not technically equal fake_id: Union[str, None] = None # add finer-grained counters for easier debugging of failing tests diff --git a/libs/partners/qdrant/langchain_qdrant/vectorstores.py b/libs/partners/qdrant/langchain_qdrant/vectorstores.py index 766a70d0fbf..7f43885416f 100644 --- a/libs/partners/qdrant/langchain_qdrant/vectorstores.py +++ b/libs/partners/qdrant/langchain_qdrant/vectorstores.py @@ -1657,7 +1657,7 @@ class Qdrant(VectorStore): f"Existing Qdrant collection {collection_name} uses named vectors. " f"If you want to reuse it, please set `vector_name` to any of the " f"existing named vectors: " - f"{', '.join(current_vector_config.keys())}." # noqa + f"{', '.join(current_vector_config.keys())}." f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) @@ -1816,7 +1816,7 @@ class Qdrant(VectorStore): f"Existing Qdrant collection {collection_name} uses named vectors. " f"If you want to reuse it, please set `vector_name` to any of the " f"existing named vectors: " - f"{', '.join(current_vector_config.keys())}." # noqa + f"{', '.join(current_vector_config.keys())}." f"If you want to recreate the collection, set `force_recreate` " f"parameter to `True`." ) diff --git a/libs/partners/robocorp/pyproject.toml b/libs/partners/robocorp/pyproject.toml index 3af04514b26..0b3e9f2fa57 100644 --- a/libs/partners/robocorp/pyproject.toml +++ b/libs/partners/robocorp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-robocorp" -version = "0.0.7" +version = "0.0.8" description = "An integration package connecting Robocorp Action Server and LangChain" authors = [] readme = "README.md" diff --git a/libs/partners/together/tests/unit_tests/test_llms.py b/libs/partners/together/tests/unit_tests/test_llms.py index 7cb2e8c6e64..0e32e24b9e7 100644 --- a/libs/partners/together/tests/unit_tests/test_llms.py +++ b/libs/partners/together/tests/unit_tests/test_llms.py @@ -27,7 +27,7 @@ def test_together_api_key_masked_when_passed_from_env( temperature=0.2, max_tokens=250, ) - print(llm.together_api_key, end="") # noqa: T201 + print(llm.together_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" @@ -43,7 +43,7 @@ def test_together_api_key_masked_when_passed_via_constructor( temperature=0.2, max_tokens=250, ) - print(llm.together_api_key, end="") # noqa: T201 + print(llm.together_api_key, end="") captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/partners/upstage/README.md b/libs/partners/upstage/README.md index fb91c0a8898..e26cb409a7c 100644 --- a/libs/partners/upstage/README.md +++ b/libs/partners/upstage/README.md @@ -21,5 +21,5 @@ See a [usage example](https://python.langchain.com/docs/integrations/chat/upstag See a [usage example](https://python.langchain.com/docs/integrations/text_embedding/upstage) -Use `solar-1-mini-embedding` as the default model for embeddings. Do not add suffixes such as `-query` or `-passage` to the model name. +Use `solar-embedding-1-large` model for embeddings. Do not add suffixes such as `-query` or `-passage` to the model name. `UpstageEmbeddings` will automatically add the suffixes based on the method called. diff --git a/libs/partners/upstage/langchain_upstage/embeddings.py b/libs/partners/upstage/langchain_upstage/embeddings.py index 08976c608f7..5a74b32832c 100644 --- a/libs/partners/upstage/langchain_upstage/embeddings.py +++ b/libs/partners/upstage/langchain_upstage/embeddings.py @@ -46,7 +46,7 @@ class UpstageEmbeddings(BaseModel, Embeddings): from langchain_upstage import UpstageEmbeddings - model = UpstageEmbeddings() + model = UpstageEmbeddings(model='solar-embedding-1-large') """ client: Any = Field(default=None, exclude=True) #: :meta private: @@ -200,6 +200,8 @@ class UpstageEmbeddings(BaseModel, Embeddings): assert ( self.embed_batch_size <= MAX_EMBED_BATCH_SIZE ), f"The embed_batch_size should not be larger than {MAX_EMBED_BATCH_SIZE}." + if not texts: + return [] params = self._invocation_params params["model"] = params["model"] + "-passage" embeddings = [] @@ -242,6 +244,8 @@ class UpstageEmbeddings(BaseModel, Embeddings): assert ( self.embed_batch_size <= MAX_EMBED_BATCH_SIZE ), f"The embed_batch_size should not be larger than {MAX_EMBED_BATCH_SIZE}." + if not texts: + return [] params = self._invocation_params params["model"] = params["model"] + "-passage" embeddings = [] diff --git a/libs/partners/upstage/langchain_upstage/layout_analysis_parsers.py b/libs/partners/upstage/langchain_upstage/layout_analysis_parsers.py index 5a4056dfe61..17b571c5132 100644 --- a/libs/partners/upstage/langchain_upstage/layout_analysis_parsers.py +++ b/libs/partners/upstage/langchain_upstage/layout_analysis_parsers.py @@ -181,19 +181,22 @@ class UpstageLayoutAnalysisParser(BaseBlobParser): result = response.json().get("elements", []) + elements = [ + element for element in result if element["category"] not in self.exclude + ] + + return elements + except requests.RequestException as req_err: # Handle any request-related exceptions print(f"Request Exception: {req_err}") + raise ValueError(f"Failed to send request: {req_err}") except json.JSONDecodeError as json_err: # Handle JSON decode errors print(f"JSON Decode Error: {json_err}") raise ValueError(f"Failed to decode JSON response: {json_err}") - elements = [ - element for element in result if element["category"] not in self.exclude - ] - - return elements + return [] def _split_and_request( self, diff --git a/libs/partners/upstage/tests/integration_tests/test_embeddings.py b/libs/partners/upstage/tests/integration_tests/test_embeddings.py index bd056d2d40b..c8e4765d5ed 100644 --- a/libs/partners/upstage/tests/integration_tests/test_embeddings.py +++ b/libs/partners/upstage/tests/integration_tests/test_embeddings.py @@ -35,3 +35,17 @@ async def test_langchain_upstage_aembed_query() -> None: embedding = UpstageEmbeddings(model="solar-embedding-1-large") output = await embedding.aembed_query(query) assert len(output) > 0 + + +def test_langchain_upstage_embed_documents_with_empty_list() -> None: + """Test Upstage embeddings with empty list.""" + embedding = UpstageEmbeddings(model="solar-embedding-1-large") + output = embedding.embed_documents([]) + assert len(output) == 0 + + +async def test_langchain_upstage_aembed_documents_with_empty_list() -> None: + """Test Upstage embeddings asynchronous with empty list.""" + embedding = UpstageEmbeddings(model="solar-embedding-1-large") + output = await embedding.aembed_documents([]) + assert len(output) == 0 diff --git a/libs/partners/upstage/tests/unit_tests/test_layout_analysis.py b/libs/partners/upstage/tests/unit_tests/test_layout_analysis.py index a74b914fb07..86c5cbb0d90 100644 --- a/libs/partners/upstage/tests/unit_tests/test_layout_analysis.py +++ b/libs/partners/upstage/tests/unit_tests/test_layout_analysis.py @@ -1,7 +1,11 @@ +import json from pathlib import Path from typing import Any, Dict, get_args +from unittest import TestCase from unittest.mock import MagicMock, Mock, patch +import requests + from langchain_upstage import UpstageLayoutAnalysisLoader from langchain_upstage.layout_analysis import OutputType, SplitType @@ -205,3 +209,45 @@ def test_page_split_html_output(mock_post: Mock) -> None: assert document.metadata["page"] == MOCK_RESPONSE_JSON["elements"][i]["page"] assert document.metadata["type"] == "html" assert document.metadata["split"] == "page" + + +@patch("requests.post") +def test_request_exception(mock_post: Mock) -> None: + mock_post.side_effect = requests.RequestException("Mocked request exception") + + loader = UpstageLayoutAnalysisLoader( + file_path=EXAMPLE_PDF_PATH, + output_type="html", + split="page", + api_key="valid_api_key", + exclude=[], + ) + + with TestCase.assertRaises(TestCase(), ValueError) as context: + loader.load() + + assert "Failed to send request: Mocked request exception" == str(context.exception) + + +@patch("requests.post") +def test_json_decode_error(mock_post: Mock) -> None: + mock_response = Mock() + mock_response.status_code = 200 + mock_response.json.side_effect = json.JSONDecodeError("Expecting value", "", 0) + mock_post.return_value = mock_response + + loader = UpstageLayoutAnalysisLoader( + file_path=EXAMPLE_PDF_PATH, + output_type="html", + split="page", + api_key="valid_api_key", + exclude=[], + ) + + with TestCase.assertRaises(TestCase(), ValueError) as context: + loader.load() + + assert ( + "Failed to decode JSON response: Expecting value: line 1 column 1 (char 0)" + == str(context.exception) + ) diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py index 5f11c6f1f94..5f669efda16 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py @@ -132,6 +132,18 @@ class ChatModelIntegrationTests(ABC): assert isinstance(result.content, str) assert len(result.content) > 0 + def test_usage_metadata( + self, chat_model_class: Type[BaseChatModel], chat_model_params: dict + ) -> None: + model = chat_model_class(**chat_model_params) + result = model.invoke("Hello") + assert result is not None + assert isinstance(result, AIMessage) + assert result.usage_metadata is not None + assert isinstance(result.usage_metadata["input_tokens"], int) + assert isinstance(result.usage_metadata["output_tokens"], int) + assert isinstance(result.usage_metadata["total_tokens"], int) + def test_tool_message_histories_string_content( self, chat_model_class: Type[BaseChatModel], diff --git a/libs/text-splitters/langchain_text_splitters/base.py b/libs/text-splitters/langchain_text_splitters/base.py index b0fb33caa2d..bdf7ae7be2d 100644 --- a/libs/text-splitters/langchain_text_splitters/base.py +++ b/libs/text-splitters/langchain_text_splitters/base.py @@ -35,7 +35,7 @@ class TextSplitter(BaseDocumentTransformer, ABC): chunk_size: int = 4000, chunk_overlap: int = 200, length_function: Callable[[str], int] = len, - keep_separator: bool = False, + keep_separator: Union[bool, Literal["start", "end"]] = False, add_start_index: bool = False, strip_whitespace: bool = True, ) -> None: @@ -45,7 +45,8 @@ class TextSplitter(BaseDocumentTransformer, ABC): chunk_size: Maximum size of chunks to return chunk_overlap: Overlap in characters between chunks length_function: Function that measures the length of given chunks - keep_separator: Whether to keep the separator in the chunks + keep_separator: Whether to keep the separator and where to place it + in each corresponding chunk (True='start') add_start_index: If `True`, includes chunk's start index in metadata strip_whitespace: If `True`, strips whitespace from the start and end of every document diff --git a/libs/text-splitters/langchain_text_splitters/character.py b/libs/text-splitters/langchain_text_splitters/character.py index 0f2ce97bcb0..6783f98363a 100644 --- a/libs/text-splitters/langchain_text_splitters/character.py +++ b/libs/text-splitters/langchain_text_splitters/character.py @@ -1,7 +1,7 @@ from __future__ import annotations import re -from typing import Any, List, Optional +from typing import Any, List, Literal, Optional, Union from langchain_text_splitters.base import Language, TextSplitter @@ -29,17 +29,25 @@ class CharacterTextSplitter(TextSplitter): def _split_text_with_regex( - text: str, separator: str, keep_separator: bool + text: str, separator: str, keep_separator: Union[bool, Literal["start", "end"]] ) -> List[str]: # Now that we have the separator, split the text if separator: if keep_separator: # The parentheses in the pattern keep the delimiters in the result. _splits = re.split(f"({separator})", text) - splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)] + splits = ( + ([_splits[i] + _splits[i + 1] for i in range(0, len(_splits) - 1, 2)]) + if keep_separator == "end" + else ([_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]) + ) if len(_splits) % 2 == 0: splits += _splits[-1:] - splits = [_splits[0]] + splits + splits = ( + (splits + [_splits[-1]]) + if keep_separator == "end" + else ([_splits[0]] + splits) + ) else: splits = re.split(separator, text) else: diff --git a/libs/text-splitters/tests/unit_tests/test_text_splitters.py b/libs/text-splitters/tests/unit_tests/test_text_splitters.py index 3d88d786fb3..062f4d089d1 100644 --- a/libs/text-splitters/tests/unit_tests/test_text_splitters.py +++ b/libs/text-splitters/tests/unit_tests/test_text_splitters.py @@ -112,6 +112,50 @@ def test_character_text_splitter_keep_separator_regex( assert output == expected_output +@pytest.mark.parametrize( + "separator, is_separator_regex", [(re.escape("."), True), (".", False)] +) +def test_character_text_splitter_keep_separator_regex_start( + separator: str, is_separator_regex: bool +) -> None: + """Test splitting by characters while keeping the separator + that is a regex special character and placing it at the start of each chunk. + """ + text = "foo.bar.baz.123" + splitter = CharacterTextSplitter( + separator=separator, + chunk_size=1, + chunk_overlap=0, + keep_separator="start", + is_separator_regex=is_separator_regex, + ) + output = splitter.split_text(text) + expected_output = ["foo", ".bar", ".baz", ".123"] + assert output == expected_output + + +@pytest.mark.parametrize( + "separator, is_separator_regex", [(re.escape("."), True), (".", False)] +) +def test_character_text_splitter_keep_separator_regex_end( + separator: str, is_separator_regex: bool +) -> None: + """Test splitting by characters while keeping the separator + that is a regex special character and placing it at the end of each chunk. + """ + text = "foo.bar.baz.123" + splitter = CharacterTextSplitter( + separator=separator, + chunk_size=1, + chunk_overlap=0, + keep_separator="end", + is_separator_regex=is_separator_regex, + ) + output = splitter.split_text(text) + expected_output = ["foo.", "bar.", "baz.", "123"] + assert output == expected_output + + @pytest.mark.parametrize( "separator, is_separator_regex", [(re.escape("."), True), (".", False)] ) diff --git a/poetry.lock b/poetry.lock index b727188febd..959bf689f6d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiohttp" @@ -1351,7 +1351,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, - {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -1694,7 +1693,7 @@ files = [ [[package]] name = "langchain" -version = "0.1.16" +version = "0.2.0" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -1705,10 +1704,8 @@ develop = true aiohttp = "^3.8.3" async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} dataclasses-json = ">= 0.5.7, < 0.7" -jsonpatch = "^1.33" -langchain-community = ">=0.0.32,<0.1" -langchain-core = "^0.1.42" -langchain-text-splitters = ">=0.0.1,<0.1" +langchain-core = "^0.2.0" +langchain-text-splitters = "^0.2.0" langsmith = "^0.1.17" numpy = "^1" pydantic = ">=1,<3" @@ -1725,10 +1722,10 @@ cli = ["typer (>=0.9.0,<0.10.0)"] cohere = ["cohere (>=4,<6)"] docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.0.2,<0.1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] javascript = ["esprima (>=4.0.1,<5.0.0)"] llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] -openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"] +openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"] qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] text-helpers = ["chardet (>=5.1.0,<6.0.0)"] @@ -1738,7 +1735,7 @@ url = "libs/langchain" [[package]] name = "langchain-community" -version = "0.0.35" +version = "0.2.0" description = "Community contributed LangChain integrations." optional = false python-versions = ">=3.8.1,<4.0" @@ -1748,7 +1745,8 @@ develop = true [package.dependencies] aiohttp = "^3.8.3" dataclasses-json = ">= 0.5.7, < 0.7" -langchain-core = "^0.1.47" +langchain = "^0.2.0" +langchain-core = "^0.2.0" langsmith = "^0.1.0" numpy = "^1" PyYAML = ">=5.3" @@ -1758,7 +1756,7 @@ tenacity = "^8.1.0" [package.extras] cli = ["typer (>=0.9.0,<0.10.0)"] -extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpickle (>=2.0.0)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-search-documents (==11.4.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.6,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpathlib (>=0.18,<0.19)", "cloudpickle (>=2.0.0)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "httpx-sse (>=0.4.0,<0.5.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "oracledb (>=2.2.0,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pyjwt (>=2.8.0,<3.0.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] [package.source] type = "directory" @@ -1766,7 +1764,7 @@ url = "libs/community" [[package]] name = "langchain-core" -version = "0.1.47" +version = "0.2.0" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -1790,7 +1788,7 @@ url = "libs/core" [[package]] name = "langchain-experimental" -version = "0.0.57" +version = "0.0.59" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -1798,8 +1796,8 @@ files = [] develop = true [package.dependencies] -langchain = "^0.1.15" -langchain-core = "^0.1.41" +langchain-community = "^0.2" +langchain-core = "^0.2" [package.extras] extended-testing = ["faker (>=19.3.1,<20.0.0)", "jinja2 (>=3,<4)", "pandas (>=2.0.1,<3.0.0)", "presidio-analyzer (>=2.2.352,<3.0.0)", "presidio-anonymizer (>=2.2.352,<3.0.0)", "sentence-transformers (>=2,<3)", "tabulate (>=0.9.0,<0.10.0)", "vowpal-wabbit-next (==0.6.0)"] @@ -1810,7 +1808,7 @@ url = "libs/experimental" [[package]] name = "langchain-openai" -version = "0.1.4" +version = "0.1.7" description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = ">=3.8.1,<4.0" @@ -1818,9 +1816,9 @@ files = [] develop = true [package.dependencies] -langchain-core = "^0.1.46" -openai = "^1.10.0" -tiktoken = ">=0.5.2,<1" +langchain-core = ">=0.2.0,<0.3" +openai = "^1.24.0" +tiktoken = ">=0.7,<1" [package.source] type = "directory" @@ -1828,7 +1826,7 @@ url = "libs/partners/openai" [[package]] name = "langchain-text-splitters" -version = "0.0.1" +version = "0.2.0" description = "LangChain text splitting utilities" optional = false python-versions = ">=3.8.1,<4.0" @@ -1836,7 +1834,7 @@ files = [] develop = true [package.dependencies] -langchain-core = "^0.1.28" +langchain-core = "^0.2.0" [package.extras] extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"] @@ -2475,13 +2473,13 @@ testing = ["matplotlib", "pytest", "pytest-cov"] [[package]] name = "openai" -version = "1.16.1" +version = "1.30.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.16.1-py3-none-any.whl", hash = "sha256:77ef3db6110071f7154859e234250fb945a36554207a30a4491092eadb73fcb5"}, - {file = "openai-1.16.1.tar.gz", hash = "sha256:58922c785d167458b46e3c76e7b1bc2306f313ee9b71791e84cbf590abe160f2"}, + {file = "openai-1.30.1-py3-none-any.whl", hash = "sha256:c9fb3c3545c118bbce8deb824397b9433a66d0d0ede6a96f7009c95b76de4a46"}, + {file = "openai-1.30.1.tar.gz", hash = "sha256:4f85190e577cba0b066e1950b8eb9b11d25bc7ebcc43a86b326ce1bfa564ec74"}, ] [package.dependencies] @@ -3003,7 +3001,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3011,16 +3008,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3037,7 +3026,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3045,7 +3033,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3962,47 +3949,47 @@ typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] [[package]] name = "tiktoken" -version = "0.6.0" +version = "0.7.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" files = [ - {file = "tiktoken-0.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:277de84ccd8fa12730a6b4067456e5cf72fef6300bea61d506c09e45658d41ac"}, - {file = "tiktoken-0.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c44433f658064463650d61387623735641dcc4b6c999ca30bc0f8ba3fccaf5c"}, - {file = "tiktoken-0.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afb9a2a866ae6eef1995ab656744287a5ac95acc7e0491c33fad54d053288ad3"}, - {file = "tiktoken-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62c05b3109fefca26fedb2820452a050074ad8e5ad9803f4652977778177d9f"}, - {file = "tiktoken-0.6.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ef917fad0bccda07bfbad835525bbed5f3ab97a8a3e66526e48cdc3e7beacf7"}, - {file = "tiktoken-0.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e095131ab6092d0769a2fda85aa260c7c383072daec599ba9d8b149d2a3f4d8b"}, - {file = "tiktoken-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:05b344c61779f815038292a19a0c6eb7098b63c8f865ff205abb9ea1b656030e"}, - {file = "tiktoken-0.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cefb9870fb55dca9e450e54dbf61f904aab9180ff6fe568b61f4db9564e78871"}, - {file = "tiktoken-0.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:702950d33d8cabc039845674107d2e6dcabbbb0990ef350f640661368df481bb"}, - {file = "tiktoken-0.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8d49d076058f23254f2aff9af603863c5c5f9ab095bc896bceed04f8f0b013a"}, - {file = "tiktoken-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:430bc4e650a2d23a789dc2cdca3b9e5e7eb3cd3935168d97d43518cbb1f9a911"}, - {file = "tiktoken-0.6.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:293cb8669757301a3019a12d6770bd55bec38a4d3ee9978ddbe599d68976aca7"}, - {file = "tiktoken-0.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bd1a288b7903aadc054b0e16ea78e3171f70b670e7372432298c686ebf9dd47"}, - {file = "tiktoken-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac76e000183e3b749634968a45c7169b351e99936ef46f0d2353cd0d46c3118d"}, - {file = "tiktoken-0.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17cc8a4a3245ab7d935c83a2db6bb71619099d7284b884f4b2aea4c74f2f83e3"}, - {file = "tiktoken-0.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:284aebcccffe1bba0d6571651317df6a5b376ff6cfed5aeb800c55df44c78177"}, - {file = "tiktoken-0.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c1a3a5d33846f8cd9dd3b7897c1d45722f48625a587f8e6f3d3e85080559be8"}, - {file = "tiktoken-0.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6318b2bb2337f38ee954fd5efa82632c6e5ced1d52a671370fa4b2eff1355e91"}, - {file = "tiktoken-0.6.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f5f0f2ed67ba16373f9a6013b68da298096b27cd4e1cf276d2d3868b5c7efd1"}, - {file = "tiktoken-0.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:75af4c0b16609c2ad02581f3cdcd1fb698c7565091370bf6c0cf8624ffaba6dc"}, - {file = "tiktoken-0.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:45577faf9a9d383b8fd683e313cf6df88b6076c034f0a16da243bb1c139340c3"}, - {file = "tiktoken-0.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7c1492ab90c21ca4d11cef3a236ee31a3e279bb21b3fc5b0e2210588c4209e68"}, - {file = "tiktoken-0.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e2b380c5b7751272015400b26144a2bab4066ebb8daae9c3cd2a92c3b508fe5a"}, - {file = "tiktoken-0.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f497598b9f58c99cbc0eb764b4a92272c14d5203fc713dd650b896a03a50ad"}, - {file = "tiktoken-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e65e8bd6f3f279d80f1e1fbd5f588f036b9a5fa27690b7f0cc07021f1dfa0839"}, - {file = "tiktoken-0.6.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5f1495450a54e564d236769d25bfefbf77727e232d7a8a378f97acddee08c1ae"}, - {file = "tiktoken-0.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6c4e4857d99f6fb4670e928250835b21b68c59250520a1941618b5b4194e20c3"}, - {file = "tiktoken-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:168d718f07a39b013032741867e789971346df8e89983fe3c0ef3fbd5a0b1cb9"}, - {file = "tiktoken-0.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:47fdcfe11bd55376785a6aea8ad1db967db7f66ea81aed5c43fad497521819a4"}, - {file = "tiktoken-0.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fb7d2ccbf1a7784810aff6b80b4012fb42c6fc37eaa68cb3b553801a5cc2d1fc"}, - {file = "tiktoken-0.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ccb7a111ee76af5d876a729a347f8747d5ad548e1487eeea90eaf58894b3138"}, - {file = "tiktoken-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2048e1086b48e3c8c6e2ceeac866561374cd57a84622fa49a6b245ffecb7744"}, - {file = "tiktoken-0.6.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:07f229a5eb250b6403a61200199cecf0aac4aa23c3ecc1c11c1ca002cbb8f159"}, - {file = "tiktoken-0.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:432aa3be8436177b0db5a2b3e7cc28fd6c693f783b2f8722539ba16a867d0c6a"}, - {file = "tiktoken-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8bfe8a19c8b5c40d121ee7938cd9c6a278e5b97dc035fd61714b4f0399d2f7a1"}, - {file = "tiktoken-0.6.0.tar.gz", hash = "sha256:ace62a4ede83c75b0374a2ddfa4b76903cf483e9cb06247f566be3bf14e6beed"}, + {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, + {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, + {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, + {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, + {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, + {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, + {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, + {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, + {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, + {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, + {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, + {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, + {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, + {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, + {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, + {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, + {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, + {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, + {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, + {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, + {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, + {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, + {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, + {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, + {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, + {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, + {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, + {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, + {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, + {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, + {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, + {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, + {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, + {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, + {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, + {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, ] [package.dependencies] diff --git a/templates/anthropic-iterative-search/main.py b/templates/anthropic-iterative-search/main.py index c04830d110a..27b7aa1aa6a 100644 --- a/templates/anthropic-iterative-search/main.py +++ b/templates/anthropic-iterative-search/main.py @@ -5,7 +5,7 @@ if __name__ == "__main__": "Which movie came out first: Oppenheimer, or " "Are You There God It's Me Margaret?" ) - print( # noqa: T201 + print( final_chain.with_config(configurable={"chain": "retrieve"}).invoke( {"query": query} ) diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py index f54b58aa1a0..1816e8c7fdd 100644 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py +++ b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py @@ -42,7 +42,7 @@ retriever = vector_store.as_retriever(search_kwargs={"k": 3}) # Please remove this and/or adapt to your use case! inserted_lines = populate(vector_store) if inserted_lines: - print(f"Done ({inserted_lines} lines inserted).") # noqa: T201 + print(f"Done ({inserted_lines} lines inserted).") entomology_template = """ You are an expert entomologist, tasked with answering enthusiast biologists' questions. diff --git a/templates/csv-agent/main.py b/templates/csv-agent/main.py index b0fba50cefe..8814b924787 100644 --- a/templates/csv-agent/main.py +++ b/templates/csv-agent/main.py @@ -2,4 +2,4 @@ from csv_agent.agent import agent_executor if __name__ == "__main__": question = "who was in cabin c28?" - print(agent_executor.invoke({"input": question})) # noqa: T201 + print(agent_executor.invoke({"input": question})) diff --git a/templates/elastic-query-generator/main.py b/templates/elastic-query-generator/main.py index 6f1ff0b8c39..4f848b6e88a 100644 --- a/templates/elastic-query-generator/main.py +++ b/templates/elastic-query-generator/main.py @@ -1,4 +1,4 @@ from elastic_query_generator.chain import chain if __name__ == "__main__": - print(chain.invoke({"input": "how many customers named Carol"})) # noqa: T201 + print(chain.invoke({"input": "how many customers named Carol"})) diff --git a/templates/neo4j-advanced-rag/main.py b/templates/neo4j-advanced-rag/main.py index a50f7d56f54..2b339491cd6 100644 --- a/templates/neo4j-advanced-rag/main.py +++ b/templates/neo4j-advanced-rag/main.py @@ -2,7 +2,7 @@ from neo4j_advanced_rag.chain import chain if __name__ == "__main__": original_query = "What is the plot of the Dune?" - print( # noqa: T201 + print( chain.invoke( {"question": original_query}, {"configurable": {"strategy": "parent_strategy"}}, diff --git a/templates/neo4j-cypher-ft/main.py b/templates/neo4j-cypher-ft/main.py index 36b7ac41e37..490d4542536 100644 --- a/templates/neo4j-cypher-ft/main.py +++ b/templates/neo4j-cypher-ft/main.py @@ -2,4 +2,4 @@ from neo4j_cypher_ft.chain import chain if __name__ == "__main__": original_query = "Did tom cruis act in top gun?" - print(chain.invoke({"question": original_query})) # noqa: T201 + print(chain.invoke({"question": original_query})) diff --git a/templates/neo4j-cypher-memory/main.py b/templates/neo4j-cypher-memory/main.py index 0250de251bd..735fac99650 100644 --- a/templates/neo4j-cypher-memory/main.py +++ b/templates/neo4j-cypher-memory/main.py @@ -2,7 +2,7 @@ from neo4j_cypher_memory.chain import chain if __name__ == "__main__": original_query = "Who played in Top Gun?" - print( # noqa: T201 + print( chain.invoke( { "question": original_query, @@ -12,7 +12,7 @@ if __name__ == "__main__": ) ) follow_up_query = "Did they play in any other movies?" - print( # noqa: T201 + print( chain.invoke( { "question": follow_up_query, diff --git a/templates/neo4j-cypher/main.py b/templates/neo4j-cypher/main.py index 7962b8f0ff4..611cbe2aff9 100644 --- a/templates/neo4j-cypher/main.py +++ b/templates/neo4j-cypher/main.py @@ -2,4 +2,4 @@ from neo4j_cypher.chain import chain if __name__ == "__main__": original_query = "Who played in Top Gun?" - print(chain.invoke({"question": original_query})) # noqa: T201 + print(chain.invoke({"question": original_query})) diff --git a/templates/neo4j-generation/main.py b/templates/neo4j-generation/main.py index 1068b9fe10c..578a18013fe 100644 --- a/templates/neo4j-generation/main.py +++ b/templates/neo4j-generation/main.py @@ -4,7 +4,7 @@ if __name__ == "__main__": text = "Harrison works at LangChain, which is located in San Francisco" allowed_nodes = ["Person", "Organization", "Location"] allowed_relationships = ["WORKS_AT", "LOCATED_IN"] - print( # noqa: T201 + print( chain( text, allowed_nodes=allowed_nodes, diff --git a/templates/neo4j-parent/main.py b/templates/neo4j-parent/main.py index e109d57e5f6..ac52947e3f7 100644 --- a/templates/neo4j-parent/main.py +++ b/templates/neo4j-parent/main.py @@ -2,4 +2,4 @@ from neo4j_parent.chain import chain if __name__ == "__main__": original_query = "What is the plot of the Dune?" - print(chain.invoke(original_query)) # noqa: T201 + print(chain.invoke(original_query)) diff --git a/templates/neo4j-semantic-layer/main.py b/templates/neo4j-semantic-layer/main.py index cbe3517d5a2..681c6a20d6e 100644 --- a/templates/neo4j-semantic-layer/main.py +++ b/templates/neo4j-semantic-layer/main.py @@ -11,7 +11,7 @@ if __name__ == "__main__": "\n\n1. John Travolta\n2. John McDonough", ) ] - print(agent_executor.invoke({"input": original_query})) # noqa: T201 - print( # noqa: T201 + print(agent_executor.invoke({"input": original_query})) + print( agent_executor.invoke({"input": followup_query, "chat_history": chat_history}) ) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py index 6e3e0a86649..784ba8f4325 100644 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py +++ b/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py @@ -34,7 +34,7 @@ def store_movie_rating(movie: str, rating: int): try: return response[0]["response"] except Exception as e: - print(e) # noqa: T201 + print(e) return "Something went wrong" diff --git a/templates/neo4j-semantic-ollama/main.py b/templates/neo4j-semantic-ollama/main.py index 3f8fe2b2830..d6ee3a91969 100644 --- a/templates/neo4j-semantic-ollama/main.py +++ b/templates/neo4j-semantic-ollama/main.py @@ -11,7 +11,7 @@ if __name__ == "__main__": "\n\n1. John Travolta\n2. John McDonough", ) ] - print(agent_executor.invoke({"input": original_query})) # noqa: T201 - print( # noqa: T201 + print(agent_executor.invoke({"input": original_query})) + print( agent_executor.invoke({"input": followup_query, "chat_history": chat_history}) ) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py index e0399af1d14..f455f70c4b7 100644 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py +++ b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py @@ -35,7 +35,7 @@ def store_movie_rating(movie: str, rating: int): try: return response[0]["response"] except Exception as e: - print(e) # noqa: T201 + print(e) return "Something went wrong" diff --git a/templates/neo4j-vector-memory/main.py b/templates/neo4j-vector-memory/main.py index ca7f16ca3ce..732f0954d59 100644 --- a/templates/neo4j-vector-memory/main.py +++ b/templates/neo4j-vector-memory/main.py @@ -4,13 +4,13 @@ if __name__ == "__main__": user_id = "user_id_1" session_id = "session_id_1" original_query = "What is the plot of the Dune?" - print( # noqa: T201 + print( chain.invoke( {"question": original_query, "user_id": user_id, "session_id": session_id} ) ) follow_up_query = "Tell me more about Leto" - print( # noqa: T201 + print( chain.invoke( {"question": follow_up_query, "user_id": user_id, "session_id": session_id} ) diff --git a/templates/nvidia-rag-canonical/ingest.py b/templates/nvidia-rag-canonical/ingest.py index e4ebd0fc2bc..d7851cfaed3 100644 --- a/templates/nvidia-rag-canonical/ingest.py +++ b/templates/nvidia-rag-canonical/ingest.py @@ -7,7 +7,7 @@ from langchain_nvidia_aiplay import NVIDIAEmbeddings from langchain_text_splitters.character import CharacterTextSplitter if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): - print("Valid NVIDIA_API_KEY already in environment. Delete to reset") # noqa: T201 + print("Valid NVIDIA_API_KEY already in environment. Delete to reset") else: nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" diff --git a/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py b/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py index 98f1b8fc7b3..78d5e8e28c5 100644 --- a/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py +++ b/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py @@ -23,7 +23,7 @@ INGESTION_CHUNK_SIZE = 500 INGESTION_CHUNK_OVERLAP = 0 if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): - print("Valid NVIDIA_API_KEY already in environment. Delete to reset") # noqa: T201 + print("Valid NVIDIA_API_KEY already in environment. Delete to reset") else: nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" diff --git a/templates/openai-functions-agent-gmail/main.py b/templates/openai-functions-agent-gmail/main.py index 93e23ef6d49..90fcfcc30df 100644 --- a/templates/openai-functions-agent-gmail/main.py +++ b/templates/openai-functions-agent-gmail/main.py @@ -6,4 +6,4 @@ if __name__ == "__main__": "First do background research on the sender and topics to make sure you" " understand the context, then write the draft." ) - print(agent_executor.invoke({"input": question, "chat_history": []})) # noqa: T201 + print(agent_executor.invoke({"input": question, "chat_history": []})) diff --git a/templates/openai-functions-agent/main.py b/templates/openai-functions-agent/main.py index 0e612d69f49..f0e1f5963f9 100644 --- a/templates/openai-functions-agent/main.py +++ b/templates/openai-functions-agent/main.py @@ -2,4 +2,4 @@ from openai_functions_agent.agent import agent_executor if __name__ == "__main__": question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"input": question, "chat_history": []})) # noqa: T201 + print(agent_executor.invoke({"input": question, "chat_history": []})) diff --git a/templates/plate-chain/plate_chain/prompts.py b/templates/plate-chain/plate_chain/prompts.py index 56f81262fda..9b8eb7380cc 100644 --- a/templates/plate-chain/plate_chain/prompts.py +++ b/templates/plate-chain/plate_chain/prompts.py @@ -18,7 +18,7 @@ FULL_PROMPT = """# Context ```json {json_format} ``` -""" # noqa: E50 +""" NUM_PLATES_PROMPT = """- There {num_plates_str} in this data.""" ROWS_PROMPT = """- Each plate has {num_rows} rows.""" diff --git a/templates/propositional-retrieval/propositional_retrieval/ingest.py b/templates/propositional-retrieval/propositional_retrieval/ingest.py index 336f8410b7d..1af11ad3f9c 100644 --- a/templates/propositional-retrieval/propositional_retrieval/ingest.py +++ b/templates/propositional-retrieval/propositional_retrieval/ingest.py @@ -68,7 +68,7 @@ if __name__ == "__main__": from langchain_text_splitters import RecursiveCharacterTextSplitter # noqa from langchain_community.document_loaders.recursive_url_loader import ( RecursiveUrlLoader, - ) # noqa + ) # The attention is all you need paper # Could add more parsing here, as it's very raw. diff --git a/templates/rag-astradb/astradb_entomology_rag/__init__.py b/templates/rag-astradb/astradb_entomology_rag/__init__.py index 3f8e807cab6..7997d5909a5 100644 --- a/templates/rag-astradb/astradb_entomology_rag/__init__.py +++ b/templates/rag-astradb/astradb_entomology_rag/__init__.py @@ -26,7 +26,7 @@ retriever = vector_store.as_retriever(search_kwargs={"k": 3}) inserted_lines = populate(vector_store) if inserted_lines: - print(f"Done ({inserted_lines} lines inserted).") # noqa: T201 + print(f"Done ({inserted_lines} lines inserted).") entomology_template = """ You are an expert entomologist, tasked with answering enthusiast biologists' questions. diff --git a/templates/rag-astradb/main.py b/templates/rag-astradb/main.py index e049d7f7437..f80b1b6626a 100644 --- a/templates/rag-astradb/main.py +++ b/templates/rag-astradb/main.py @@ -2,4 +2,4 @@ from astradb_entomology_rag import chain if __name__ == "__main__": response = chain.invoke("Are there more coleoptera or bugs?") - print(response) # noqa: T201 + print(response) diff --git a/templates/rag-aws-bedrock/main.py b/templates/rag-aws-bedrock/main.py index 7be600d6bc9..d0a3c2f48c6 100644 --- a/templates/rag-aws-bedrock/main.py +++ b/templates/rag-aws-bedrock/main.py @@ -3,4 +3,4 @@ from rag_aws_bedrock.chain import chain if __name__ == "__main__": query = "What is this data about?" - print(chain.invoke(query)) # noqa: T201 + print(chain.invoke(query)) diff --git a/templates/rag-aws-kendra/main.py b/templates/rag-aws-kendra/main.py index a44556cfe30..ceb1daa7afd 100644 --- a/templates/rag-aws-kendra/main.py +++ b/templates/rag-aws-kendra/main.py @@ -3,4 +3,4 @@ from rag_aws_kendra.chain import chain if __name__ == "__main__": query = "Does Kendra support table extraction?" - print(chain.invoke(query)) # noqa: T201 + print(chain.invoke(query)) diff --git a/templates/rag-chroma-multi-modal-multi-vector/ingest.py b/templates/rag-chroma-multi-modal-multi-vector/ingest.py index 941a8b6670f..9447ca478ec 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/ingest.py +++ b/templates/rag-chroma-multi-modal-multi-vector/ingest.py @@ -66,7 +66,7 @@ def generate_img_summaries(img_base64_list): image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: - print(f"Error with image {i+1}: {e}") # noqa: T201 + print(f"Error with image {i+1}: {e}") return image_summaries, processed_images @@ -178,14 +178,14 @@ def create_multi_vector_retriever( # Load PDF doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" rel_doc_path = doc_path.relative_to(Path.cwd()) -print("Extract slides as images") # noqa: T201 +print("Extract slides as images") pil_images = get_images_from_pdf(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Image summaries -print("Generate image summaries") # noqa: T201 +print("Generate image summaries") image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # The vectorstore to use to index the images summaries diff --git a/templates/rag-chroma-multi-modal/ingest.py b/templates/rag-chroma-multi-modal/ingest.py index 98d4f701a43..67c5f070c5b 100644 --- a/templates/rag-chroma-multi-modal/ingest.py +++ b/templates/rag-chroma-multi-modal/ingest.py @@ -27,14 +27,14 @@ doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" img_dump_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -print("pdf index") # noqa: T201 +print("pdf index") pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) -print("done") # noqa: T201 +print("done") vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function -print("Loading embedding function") # noqa: T201 +print("Loading embedding function") embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma @@ -54,5 +54,5 @@ image_uris = sorted( ) # Add images -print("Embedding images") # noqa: T201 +print("Embedding images") vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-elasticsearch/main.py b/templates/rag-elasticsearch/main.py index 8da3a6ab067..4034ab08f26 100644 --- a/templates/rag-elasticsearch/main.py +++ b/templates/rag-elasticsearch/main.py @@ -14,7 +14,7 @@ if __name__ == "__main__": "chat_history": [], } ) - print(response) # noqa: T201 + print(response) follow_up_question = "What are their objectives?" @@ -30,4 +30,4 @@ if __name__ == "__main__": } ) - print(response) # noqa: T201 + print(response) diff --git a/templates/rag-fusion/main.py b/templates/rag-fusion/main.py index 8d6fe45edfe..ed32889561a 100644 --- a/templates/rag-fusion/main.py +++ b/templates/rag-fusion/main.py @@ -2,4 +2,4 @@ from rag_fusion.chain import chain if __name__ == "__main__": original_query = "impact of climate change" - print(chain.invoke(original_query)) # noqa: T201 + print(chain.invoke(original_query)) diff --git a/templates/rag-gemini-multi-modal/ingest.py b/templates/rag-gemini-multi-modal/ingest.py index 98d4f701a43..67c5f070c5b 100644 --- a/templates/rag-gemini-multi-modal/ingest.py +++ b/templates/rag-gemini-multi-modal/ingest.py @@ -27,14 +27,14 @@ doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" img_dump_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -print("pdf index") # noqa: T201 +print("pdf index") pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) -print("done") # noqa: T201 +print("done") vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function -print("Loading embedding function") # noqa: T201 +print("Loading embedding function") embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma @@ -54,5 +54,5 @@ image_uris = sorted( ) # Add images -print("Embedding images") # noqa: T201 +print("Embedding images") vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-google-cloud-sensitive-data-protection/main.py b/templates/rag-google-cloud-sensitive-data-protection/main.py index a69f0756734..30c6fa53c3f 100644 --- a/templates/rag-google-cloud-sensitive-data-protection/main.py +++ b/templates/rag-google-cloud-sensitive-data-protection/main.py @@ -6,4 +6,4 @@ if __name__ == "__main__": "is 555-555-5555. And my email is lovely.pirate@gmail.com. Have a nice day.", "chat_history": [], } - print(chain.invoke(query)) # noqa: T201 + print(chain.invoke(query)) diff --git a/templates/rag-google-cloud-vertexai-search/main.py b/templates/rag-google-cloud-vertexai-search/main.py index 6912be7e2a6..a96c83c8bb4 100644 --- a/templates/rag-google-cloud-vertexai-search/main.py +++ b/templates/rag-google-cloud-vertexai-search/main.py @@ -2,4 +2,4 @@ from rag_google_cloud_vertexai_search.chain import chain if __name__ == "__main__": query = "Who is the CEO of Google Cloud?" - print(chain.invoke(query)) # noqa: T201 + print(chain.invoke(query)) diff --git a/templates/rag-milvus/.gitignore b/templates/rag-milvus/.gitignore new file mode 100644 index 00000000000..bee8a64b79a --- /dev/null +++ b/templates/rag-milvus/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/templates/rag-milvus/LICENSE b/templates/rag-milvus/LICENSE new file mode 100644 index 00000000000..fc0602feecd --- /dev/null +++ b/templates/rag-milvus/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 LangChain, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/templates/rag-milvus/README.md b/templates/rag-milvus/README.md new file mode 100644 index 00000000000..c5c28981730 --- /dev/null +++ b/templates/rag-milvus/README.md @@ -0,0 +1,68 @@ +# rag-milvus + +This template performs RAG using Milvus and OpenAI. + +## Environment Setup + +Start the milvus server instance, and get the host ip and port. + +Set the `OPENAI_API_KEY` environment variable to access the OpenAI models. + +## Usage + +To use this package, you should first have the LangChain CLI installed: + +```shell +pip install -U langchain-cli +``` + +To create a new LangChain project and install this as the only package, you can do: + +```shell +langchain app new my-app --package rag-milvus +``` + +If you want to add this to an existing project, you can just run: + +```shell +langchain app add rag-milvus +``` + +And add the following code to your `server.py` file: +```python +from rag_milvus import chain as rag_milvus_chain + +add_routes(app, rag_milvus_chain, path="/rag-milvus") +``` + +(Optional) Let's now configure LangSmith. +LangSmith will help us trace, monitor and debug LangChain applications. +You can sign up for LangSmith [here](https://smith.langchain.com/). +If you don't have access, you can skip this section + + +```shell +export LANGCHAIN_TRACING_V2=true +export LANGCHAIN_API_KEY= +export LANGCHAIN_PROJECT= # if not specified, defaults to "default" +``` + +If you are inside this directory, then you can spin up a LangServe instance directly by: + +```shell +langchain serve +``` + +This will start the FastAPI app with a server is running locally at +[http://localhost:8000](http://localhost:8000) + +We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs) +We can access the playground at [http://127.0.0.1:8000/rag-milvus/playground](http://127.0.0.1:8000/rag-milvus/playground) + +We can access the template from code with: + +```python +from langserve.client import RemoteRunnable + +runnable = RemoteRunnable("http://localhost:8000/rag-milvus") +``` diff --git a/templates/rag-milvus/poetry.lock b/templates/rag-milvus/poetry.lock new file mode 100644 index 00000000000..48776d16b44 --- /dev/null +++ b/templates/rag-milvus/poetry.lock @@ -0,0 +1,2621 @@ +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "aiohttp" +version = "3.9.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "3.7.1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, + {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, +] + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (<0.22)"] + +[[package]] +name = "argon2-cffi" +version = "23.1.0" +description = "Argon2 for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, + {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, +] + +[package.dependencies] +argon2-cffi-bindings = "*" + +[package.extras] +dev = ["argon2-cffi[tests,typing]", "tox (>4)"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] +tests = ["hypothesis", "pytest"] +typing = ["mypy"] + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +description = "Low-level CFFI bindings for Argon2" +optional = false +python-versions = ">=3.6" +files = [ + {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, + {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, +] + +[package.dependencies] +cffi = ">=1.0.1" + +[package.extras] +dev = ["cogapp", "pre-commit", "pytest", "wheel"] +tests = ["pytest"] + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + +[[package]] +name = "azure-core" +version = "1.30.1" +description = "Microsoft Azure Core Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-core-1.30.1.tar.gz", hash = "sha256:26273a254131f84269e8ea4464f3560c731f29c0c1f69ac99010845f239c1a8f"}, + {file = "azure_core-1.30.1-py3-none-any.whl", hash = "sha256:7c5ee397e48f281ec4dd773d67a0a47a0962ed6fa833036057f9ea067f688e74"}, +] + +[package.dependencies] +requests = ">=2.21.0" +six = ">=1.11.0" +typing-extensions = ">=4.6.0" + +[package.extras] +aio = ["aiohttp (>=3.0)"] + +[[package]] +name = "azure-storage-blob" +version = "12.19.1" +description = "Microsoft Azure Blob Storage Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "azure-storage-blob-12.19.1.tar.gz", hash = "sha256:13e16ba42fc54ac2c7e8f976062173a5c82b9ec0594728e134aac372965a11b0"}, + {file = "azure_storage_blob-12.19.1-py3-none-any.whl", hash = "sha256:c5530dc51c21c9564e4eb706cd499befca8819b10dd89716d3fc90d747556243"}, +] + +[package.dependencies] +azure-core = ">=1.28.0,<2.0.0" +cryptography = ">=2.1.4" +isodate = ">=0.6.1" +typing-extensions = ">=4.3.0" + +[package.extras] +aio = ["azure-core[aio] (>=1.28.0,<2.0.0)"] + +[[package]] +name = "certifi" +version = "2024.2.2" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "cryptography" +version = "42.0.5" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, + {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, + {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, + {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, + {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, + {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, + {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, + {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "dataclasses-json" +version = "0.6.4" +description = "Easily serialize dataclasses to and from JSON." +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "dataclasses_json-0.6.4-py3-none-any.whl", hash = "sha256:f90578b8a3177f7552f4e1a6e535e84293cd5da421fcce0642d49c0d7bdf8df2"}, + {file = "dataclasses_json-0.6.4.tar.gz", hash = "sha256:73696ebf24936560cca79a2430cbc4f3dd23ac7bf46ed17f38e5e5e7657a6377"}, +] + +[package.dependencies] +marshmallow = ">=3.18.0,<4.0.0" +typing-inspect = ">=0.4.0,<1" + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "environs" +version = "9.5.0" +description = "simplified environment variable parsing" +optional = false +python-versions = ">=3.6" +files = [ + {file = "environs-9.5.0-py2.py3-none-any.whl", hash = "sha256:1e549569a3de49c05f856f40bce86979e7d5ffbbc4398e7f338574c220189124"}, + {file = "environs-9.5.0.tar.gz", hash = "sha256:a76307b36fbe856bdca7ee9161e6c466fd7fcffc297109a118c59b54e27e30c9"}, +] + +[package.dependencies] +marshmallow = ">=3.0.0" +python-dotenv = "*" + +[package.extras] +dev = ["dj-database-url", "dj-email-url", "django-cache-url", "flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)", "pytest", "tox"] +django = ["dj-database-url", "dj-email-url", "django-cache-url"] +lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"] +tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"] + +[[package]] +name = "exceptiongroup" +version = "1.2.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fastapi" +version = "0.104.1" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.104.1-py3-none-any.whl", hash = "sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241"}, + {file = "fastapi-0.104.1.tar.gz", hash = "sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae"}, +] + +[package.dependencies] +anyio = ">=3.7.1,<4.0.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.27.0,<0.28.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.43" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, + {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] + +[[package]] +name = "greenlet" +version = "3.0.3" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, + {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, + {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, + {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, + {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, + {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, + {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, + {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, + {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, + {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, + {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, + {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, + {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, + {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, + {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, + {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpcio" +version = "1.60.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139"}, + {file = "grpcio-1.60.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff"}, + {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491"}, + {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43"}, + {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae"}, + {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508"}, + {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b"}, + {file = "grpcio-1.60.0-cp310-cp310-win32.whl", hash = "sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d"}, + {file = "grpcio-1.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df"}, + {file = "grpcio-1.60.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd"}, + {file = "grpcio-1.60.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14"}, + {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c"}, + {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134"}, + {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253"}, + {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444"}, + {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d"}, + {file = "grpcio-1.60.0-cp311-cp311-win32.whl", hash = "sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320"}, + {file = "grpcio-1.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b"}, + {file = "grpcio-1.60.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18"}, + {file = "grpcio-1.60.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748"}, + {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e"}, + {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b"}, + {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55"}, + {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca"}, + {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5"}, + {file = "grpcio-1.60.0-cp312-cp312-win32.whl", hash = "sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951"}, + {file = "grpcio-1.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a"}, + {file = "grpcio-1.60.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415"}, + {file = "grpcio-1.60.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65"}, + {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324"}, + {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454"}, + {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6"}, + {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619"}, + {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179"}, + {file = "grpcio-1.60.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b"}, + {file = "grpcio-1.60.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e"}, + {file = "grpcio-1.60.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13"}, + {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead"}, + {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19"}, + {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0"}, + {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390"}, + {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629"}, + {file = "grpcio-1.60.0-cp38-cp38-win32.whl", hash = "sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860"}, + {file = "grpcio-1.60.0-cp38-cp38-win_amd64.whl", hash = "sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08"}, + {file = "grpcio-1.60.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968"}, + {file = "grpcio-1.60.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590"}, + {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2"}, + {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab"}, + {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328"}, + {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf"}, + {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6"}, + {file = "grpcio-1.60.0-cp39-cp39-win32.whl", hash = "sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03"}, + {file = "grpcio-1.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353"}, + {file = "grpcio-1.60.0.tar.gz", hash = "sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.60.0)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "isodate" +version = "0.6.1" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = false +python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "jsonpatch" +version = "1.33" +description = "Apply JSON-Patches (RFC 6902)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, +] + +[package.dependencies] +jsonpointer = ">=1.9" + +[[package]] +name = "jsonpointer" +version = "2.4" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, +] + +[[package]] +name = "langchain" +version = "0.1.14" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain-0.1.14-py3-none-any.whl", hash = "sha256:94f9b5df2421faaf762d4f43b9d65c270c2f701934580d281e4c6226deef7234"}, + {file = "langchain-0.1.14.tar.gz", hash = "sha256:124c6244cf3957616b98f2df07dc2992fc40dff6ed1a62d8ee8a40f1e0260a40"}, +] + +[package.dependencies] +aiohttp = ">=3.8.3,<4.0.0" +async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} +dataclasses-json = ">=0.5.7,<0.7" +jsonpatch = ">=1.33,<2.0" +langchain-community = ">=0.0.30,<0.1" +langchain-core = ">=0.1.37,<0.2.0" +langchain-text-splitters = ">=0.0.1,<0.1" +langsmith = ">=0.1.17,<0.2.0" +numpy = ">=1,<2" +pydantic = ">=1,<3" +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" +tenacity = ">=8.1.0,<9.0.0" + +[package.extras] +azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-textanalytics (>=5.3.0,<6.0.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b8)", "openai (<2)"] +clarifai = ["clarifai (>=9.1.0)"] +cli = ["typer (>=0.9.0,<0.10.0)"] +cohere = ["cohere (>=4,<6)"] +docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] +embeddings = ["sentence-transformers (>=2,<3)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.0.2,<0.1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +javascript = ["esprima (>=4.0.1,<5.0.0)"] +llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] +openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"] +qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] +text-helpers = ["chardet (>=5.1.0,<6.0.0)"] + +[[package]] +name = "langchain-cli" +version = "0.0.21" +description = "CLI for interacting with LangChain" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "langchain_cli-0.0.21-py3-none-any.whl", hash = "sha256:cd5c83597ef857704db983aa1743d7c2e6da52d634f735a7610630347347583e"}, + {file = "langchain_cli-0.0.21.tar.gz", hash = "sha256:d36a40955533ce0217b9a89c11bf593b18d8b40f2abbc81c9a531eb23f54809f"}, +] + +[package.dependencies] +gitpython = ">=3.1.40,<4.0.0" +langserve = {version = ">=0.0.16", extras = ["all"]} +tomlkit = ">=0.12.2,<0.13.0" +typer = {version = ">=0.9.0,<0.10.0", extras = ["all"]} +uvicorn = ">=0.23.2,<0.24.0" + +[[package]] +name = "langchain-community" +version = "0.0.30" +description = "Community contributed LangChain integrations." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_community-0.0.30-py3-none-any.whl", hash = "sha256:4bee40b5a42a1469453e1b11038ab4a7907040f6b050dc2bf68b278be66da0b8"}, + {file = "langchain_community-0.0.30.tar.gz", hash = "sha256:96f5ef091ced7054b5e51023b61cf97be3a48e173abd10fa3e61b656728b1d6c"}, +] + +[package.dependencies] +aiohttp = ">=3.8.3,<4.0.0" +dataclasses-json = ">=0.5.7,<0.7" +langchain-core = ">=0.1.37,<0.2.0" +langsmith = ">=0.1.0,<0.2.0" +numpy = ">=1,<2" +PyYAML = ">=5.3" +requests = ">=2,<3" +SQLAlchemy = ">=1.4,<3" +tenacity = ">=8.1.0,<9.0.0" + +[package.extras] +cli = ["typer (>=0.9.0,<0.10.0)"] +extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "azure-ai-documentintelligence (>=1.0.0b1,<2.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cloudpickle (>=2.0.0)", "cohere (>=4,<5)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "elasticsearch (>=8.12.0,<9.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "friendli-client (>=1.2.4,<2.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "gradientai (>=1.4.0,<2.0.0)", "hdbcli (>=2.19.21,<3.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "httpx (>=0.24.1,<0.25.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "nvidia-riva-client (>=2.14.0,<3.0.0)", "oci (>=2.119.1,<3.0.0)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "oracle-ads (>=2.9.1,<3.0.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "premai (>=0.3.25,<0.4.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tidb-vector (>=0.0.3,<1.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "tree-sitter (>=0.20.2,<0.21.0)", "tree-sitter-languages (>=1.8.0,<2.0.0)", "upstash-redis (>=0.15.0,<0.16.0)", "vdms (>=0.0.20,<0.0.21)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)", "zhipuai (>=1.0.7,<2.0.0)"] + +[[package]] +name = "langchain-core" +version = "0.1.46" +description = "Building applications with LLMs through composability" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_core-0.1.46-py3-none-any.whl", hash = "sha256:1c0befcd2665dd4aa153318aa9bf729071644b4c179e491769b8e583b4bf7441"}, + {file = "langchain_core-0.1.46.tar.gz", hash = "sha256:17c416349f5c7a9808e70e3725749a3a2df5088f1ecca045c883871aa95f9c9e"}, +] + +[package.dependencies] +jsonpatch = ">=1.33,<2.0" +langsmith = ">=0.1.0,<0.2.0" +packaging = ">=23.2,<24.0" +pydantic = ">=1,<3" +PyYAML = ">=5.3" +tenacity = ">=8.1.0,<9.0.0" + +[package.extras] +extended-testing = ["jinja2 (>=3,<4)"] + +[[package]] +name = "langchain-openai" +version = "0.1.4" +description = "An integration package connecting OpenAI and LangChain" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langchain_openai-0.1.4-py3-none-any.whl", hash = "sha256:a349ada8724921e380aab03ee312568f5ca99adbc806f6878d79ff9cd1d6d353"}, + {file = "langchain_openai-0.1.4.tar.gz", hash = "sha256:1a3220464c270d73ea3987010617789adc2099d4d4740b15c7734ab07e1f054b"}, +] + +[package.dependencies] +langchain-core = ">=0.1.46,<0.2.0" +openai = ">=1.10.0,<2.0.0" +tiktoken = ">=0.5.2,<1" + +[[package]] +name = "langchain-text-splitters" +version = "0.0.1" +description = "LangChain text splitting utilities" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "langchain_text_splitters-0.0.1-py3-none-any.whl", hash = "sha256:f5b802f873f5ff6a8b9259ff34d53ed989666ef4e1582e6d1adb3b5520e3839a"}, + {file = "langchain_text_splitters-0.0.1.tar.gz", hash = "sha256:ac459fa98799f5117ad5425a9330b21961321e30bc19a2a2f9f761ddadd62aa1"}, +] + +[package.dependencies] +langchain-core = ">=0.1.28,<0.2.0" + +[package.extras] +extended-testing = ["lxml (>=5.1.0,<6.0.0)"] + +[[package]] +name = "langserve" +version = "0.1.1" +description = "" +optional = false +python-versions = "<4.0.0,>=3.8.1" +files = [ + {file = "langserve-0.1.1-py3-none-any.whl", hash = "sha256:5d6373ac87a095dd4ebf7be3f54a6c0cc7c691d9a8b9dcb24d85901058e7ddac"}, + {file = "langserve-0.1.1.tar.gz", hash = "sha256:db607c6b231c4bf93438e02c73cc8618877acefa8cb98f68e351ec10b5d52e1d"}, +] + +[package.dependencies] +fastapi = {version = ">=0.90.1,<1", optional = true, markers = "extra == \"server\" or extra == \"all\""} +httpx = ">=0.23.0" +langchain-core = ">=0.1.0,<0.2.0" +orjson = ">=2" +pydantic = ">=1" +sse-starlette = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"server\" or extra == \"all\""} + +[package.extras] +all = ["fastapi (>=0.90.1,<1)", "sse-starlette (>=1.3.0,<2.0.0)"] +server = ["fastapi (>=0.90.1,<1)", "sse-starlette (>=1.3.0,<2.0.0)"] + +[[package]] +name = "langsmith" +version = "0.1.51" +description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langsmith-0.1.51-py3-none-any.whl", hash = "sha256:1e7363a3f472ecf02a1d91f6dbacde25519554b98c490be71716fcffaab0ca6b"}, + {file = "langsmith-0.1.51.tar.gz", hash = "sha256:b99b40a8c00e66174540865caa61412622fa1dc4f02602965364919c90528f97"}, +] + +[package.dependencies] +orjson = ">=3.9.14,<4.0.0" +pydantic = ">=1,<3" +requests = ">=2,<3" + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "marshmallow" +version = "3.21.1" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, + {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.2.6)", "sphinx-issues (==4.0.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "minio" +version = "7.2.6" +description = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage" +optional = false +python-versions = "*" +files = [ + {file = "minio-7.2.6-py3-none-any.whl", hash = "sha256:4972273a924f274e2d71f38f6d2afdf841a034801e60ba758e5c5aff4234b768"}, + {file = "minio-7.2.6.tar.gz", hash = "sha256:c545d0dda1ff26cefcfc754242be3d27a4e620e37ef3e51ecbe7212cf7ecc274"}, +] + +[package.dependencies] +argon2-cffi = "*" +certifi = "*" +pycryptodome = "*" +typing-extensions = "*" +urllib3 = "*" + +[[package]] +name = "multidict" +version = "6.0.5" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"}, + {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"}, + {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"}, + {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"}, + {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"}, + {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"}, + {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"}, + {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"}, + {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"}, + {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"}, + {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, + {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, + {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, + {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, + {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, + {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, + {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"}, + {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"}, + {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"}, + {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"}, + {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"}, + {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"}, + {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"}, + {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"}, + {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"}, + {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"}, + {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"}, + {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"}, + {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"}, + {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"}, + {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"}, + {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, + {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "numpy" +version = "1.24.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +] + +[[package]] +name = "openai" +version = "1.23.6" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-1.23.6-py3-none-any.whl", hash = "sha256:f406c76ba279d16b9aca5a89cee0d968488e39f671f4dc6f0d690ac3c6f6fca1"}, + {file = "openai-1.23.6.tar.gz", hash = "sha256:612de2d54cf580920a1156273f84aada6b3dca26d048f62eb5364a4314d7f449"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.7,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "orjson" +version = "3.10.1" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8ec2fc456d53ea4a47768f622bb709be68acd455b0c6be57e91462259741c4f3"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e900863691d327758be14e2a491931605bd0aded3a21beb6ce133889830b659"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab6ecbd6fe57785ebc86ee49e183f37d45f91b46fc601380c67c5c5e9c0014a2"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af7c68b01b876335cccfb4eee0beef2b5b6eae1945d46a09a7c24c9faac7a77"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:915abfb2e528677b488a06eba173e9d7706a20fdfe9cdb15890b74ef9791b85e"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3fd4a36eff9c63d25503b439531d21828da9def0059c4f472e3845a081aa0b"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d229564e72cfc062e6481a91977a5165c5a0fdce11ddc19ced8471847a67c517"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9e00495b18304173ac843b5c5fbea7b6f7968564d0d49bef06bfaeca4b656f4e"}, + {file = "orjson-3.10.1-cp310-none-win32.whl", hash = "sha256:fd78ec55179545c108174ba19c1795ced548d6cac4d80d014163033c047ca4ea"}, + {file = "orjson-3.10.1-cp310-none-win_amd64.whl", hash = "sha256:50ca42b40d5a442a9e22eece8cf42ba3d7cd4cd0f2f20184b4d7682894f05eec"}, + {file = "orjson-3.10.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b345a3d6953628df2f42502297f6c1e1b475cfbf6268013c94c5ac80e8abc04c"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caa7395ef51af4190d2c70a364e2f42138e0e5fcb4bc08bc9b76997659b27dab"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b01d701decd75ae092e5f36f7b88a1e7a1d3bb7c9b9d7694de850fb155578d5a"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5028981ba393f443d8fed9049211b979cadc9d0afecf162832f5a5b152c6297"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31ff6a222ea362b87bf21ff619598a4dc1106aaafaea32b1c4876d692891ec27"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e852a83d7803d3406135fb7a57cf0c1e4a3e73bac80ec621bd32f01c653849c5"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2567bc928ed3c3fcd90998009e8835de7c7dc59aabcf764b8374d36044864f3b"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4ce98cac60b7bb56457bdd2ed7f0d5d7f242d291fdc0ca566c83fa721b52e92d"}, + {file = "orjson-3.10.1-cp311-none-win32.whl", hash = "sha256:813905e111318acb356bb8029014c77b4c647f8b03f314e7b475bd9ce6d1a8ce"}, + {file = "orjson-3.10.1-cp311-none-win_amd64.whl", hash = "sha256:03a3ca0b3ed52bed1a869163a4284e8a7b0be6a0359d521e467cdef7e8e8a3ee"}, + {file = "orjson-3.10.1-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f02c06cee680b1b3a8727ec26c36f4b3c0c9e2b26339d64471034d16f74f4ef5"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1aa2f127ac546e123283e437cc90b5ecce754a22306c7700b11035dad4ccf85"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2cf29b4b74f585225196944dffdebd549ad2af6da9e80db7115984103fb18a96"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1b130c20b116f413caf6059c651ad32215c28500dce9cd029a334a2d84aa66f"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d31f9a709e6114492136e87c7c6da5e21dfedebefa03af85f3ad72656c493ae9"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1d169461726f271ab31633cf0e7e7353417e16fb69256a4f8ecb3246a78d6e"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57c294d73825c6b7f30d11c9e5900cfec9a814893af7f14efbe06b8d0f25fba9"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d7f11dbacfa9265ec76b4019efffabaabba7a7ebf14078f6b4df9b51c3c9a8ea"}, + {file = "orjson-3.10.1-cp312-none-win32.whl", hash = "sha256:d89e5ed68593226c31c76ab4de3e0d35c760bfd3fbf0a74c4b2be1383a1bf123"}, + {file = "orjson-3.10.1-cp312-none-win_amd64.whl", hash = "sha256:aa76c4fe147fd162107ce1692c39f7189180cfd3a27cfbc2ab5643422812da8e"}, + {file = "orjson-3.10.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a2c6a85c92d0e494c1ae117befc93cf8e7bca2075f7fe52e32698da650b2c6d1"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9813f43da955197d36a7365eb99bed42b83680801729ab2487fef305b9ced866"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec917b768e2b34b7084cb6c68941f6de5812cc26c6f1a9fecb728e36a3deb9e8"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5252146b3172d75c8a6d27ebca59c9ee066ffc5a277050ccec24821e68742fdf"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:536429bb02791a199d976118b95014ad66f74c58b7644d21061c54ad284e00f4"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dfed3c3e9b9199fb9c3355b9c7e4649b65f639e50ddf50efdf86b45c6de04b5"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2b230ec35f188f003f5b543644ae486b2998f6afa74ee3a98fc8ed2e45960afc"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:01234249ba19c6ab1eb0b8be89f13ea21218b2d72d496ef085cfd37e1bae9dd8"}, + {file = "orjson-3.10.1-cp38-none-win32.whl", hash = "sha256:8a884fbf81a3cc22d264ba780920d4885442144e6acaa1411921260416ac9a54"}, + {file = "orjson-3.10.1-cp38-none-win_amd64.whl", hash = "sha256:dab5f802d52b182163f307d2b1f727d30b1762e1923c64c9c56dd853f9671a49"}, + {file = "orjson-3.10.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a51fd55d4486bc5293b7a400f9acd55a2dc3b5fc8420d5ffe9b1d6bb1a056a5e"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53521542a6db1411b3bfa1b24ddce18605a3abdc95a28a67b33f9145f26aa8f2"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27d610df96ac18ace4931411d489637d20ab3b8f63562b0531bba16011998db0"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79244b1456e5846d44e9846534bd9e3206712936d026ea8e6a55a7374d2c0694"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d751efaa8a49ae15cbebdda747a62a9ae521126e396fda8143858419f3b03610"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27ff69c620a4fff33267df70cfd21e0097c2a14216e72943bd5414943e376d77"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ebc58693464146506fde0c4eb1216ff6d4e40213e61f7d40e2f0dde9b2f21650"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5be608c3972ed902e0143a5b8776d81ac1059436915d42defe5c6ae97b3137a4"}, + {file = "orjson-3.10.1-cp39-none-win32.whl", hash = "sha256:4ae10753e7511d359405aadcbf96556c86e9dbf3a948d26c2c9f9a150c52b091"}, + {file = "orjson-3.10.1-cp39-none-win_amd64.whl", hash = "sha256:fb5bc4caa2c192077fdb02dce4e5ef8639e7f20bec4e3a834346693907362932"}, + {file = "orjson-3.10.1.tar.gz", hash = "sha256:a883b28d73370df23ed995c466b4f6c708c1f7a9bdc400fe89165c96c7603204"}, +] + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "protobuf" +version = "5.26.1" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-5.26.1-cp310-abi3-win32.whl", hash = "sha256:3c388ea6ddfe735f8cf69e3f7dc7611e73107b60bdfcf5d0f024c3ccd3794e23"}, + {file = "protobuf-5.26.1-cp310-abi3-win_amd64.whl", hash = "sha256:e6039957449cb918f331d32ffafa8eb9255769c96aa0560d9a5bf0b4e00a2a33"}, + {file = "protobuf-5.26.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:38aa5f535721d5bb99861166c445c4105c4e285c765fbb2ac10f116e32dcd46d"}, + {file = "protobuf-5.26.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:fbfe61e7ee8c1860855696e3ac6cfd1b01af5498facc6834fcc345c9684fb2ca"}, + {file = "protobuf-5.26.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:f7417703f841167e5a27d48be13389d52ad705ec09eade63dfc3180a959215d7"}, + {file = "protobuf-5.26.1-cp38-cp38-win32.whl", hash = "sha256:d693d2504ca96750d92d9de8a103102dd648fda04540495535f0fec7577ed8fc"}, + {file = "protobuf-5.26.1-cp38-cp38-win_amd64.whl", hash = "sha256:9b557c317ebe6836835ec4ef74ec3e994ad0894ea424314ad3552bc6e8835b4e"}, + {file = "protobuf-5.26.1-cp39-cp39-win32.whl", hash = "sha256:b9ba3ca83c2e31219ffbeb9d76b63aad35a3eb1544170c55336993d7a18ae72c"}, + {file = "protobuf-5.26.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ee014c2c87582e101d6b54260af03b6596728505c79f17c8586e7523aaa8f8c"}, + {file = "protobuf-5.26.1-py3-none-any.whl", hash = "sha256:da612f2720c0183417194eeaa2523215c4fcc1a1949772dc65f05047e08d5932"}, + {file = "protobuf-5.26.1.tar.gz", hash = "sha256:8ca2a1d97c290ec7b16e4e5dff2e5ae150cc1582f55b5ab300d45cb0dfa90e51"}, +] + +[[package]] +name = "pyarrow" +version = "16.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyarrow-16.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:22a1fdb1254e5095d629e29cd1ea98ed04b4bbfd8e42cc670a6b639ccc208b60"}, + {file = "pyarrow-16.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:574a00260a4ed9d118a14770edbd440b848fcae5a3024128be9d0274dbcaf858"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0815d0ddb733b8c1b53a05827a91f1b8bde6240f3b20bf9ba5d650eb9b89cdf"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df0080339387b5d30de31e0a149c0c11a827a10c82f0c67d9afae3981d1aabb7"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:edf38cce0bf0dcf726e074159c60516447e4474904c0033f018c1f33d7dac6c5"}, + {file = "pyarrow-16.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91d28f9a40f1264eab2af7905a4d95320ac2f287891e9c8b0035f264fe3c3a4b"}, + {file = "pyarrow-16.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:99af421ee451a78884d7faea23816c429e263bd3618b22d38e7992c9ce2a7ad9"}, + {file = "pyarrow-16.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d22d0941e6c7bafddf5f4c0662e46f2075850f1c044bf1a03150dd9e189427ce"}, + {file = "pyarrow-16.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:266ddb7e823f03733c15adc8b5078db2df6980f9aa93d6bb57ece615df4e0ba7"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cc23090224b6594f5a92d26ad47465af47c1d9c079dd4a0061ae39551889efe"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56850a0afe9ef37249d5387355449c0f94d12ff7994af88f16803a26d38f2016"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:705db70d3e2293c2f6f8e84874b5b775f690465798f66e94bb2c07bab0a6bb55"}, + {file = "pyarrow-16.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:5448564754c154997bc09e95a44b81b9e31ae918a86c0fcb35c4aa4922756f55"}, + {file = "pyarrow-16.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:729f7b262aa620c9df8b9967db96c1575e4cfc8c25d078a06968e527b8d6ec05"}, + {file = "pyarrow-16.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fb8065dbc0d051bf2ae2453af0484d99a43135cadabacf0af588a3be81fbbb9b"}, + {file = "pyarrow-16.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:20ce707d9aa390593ea93218b19d0eadab56390311cb87aad32c9a869b0e958c"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5823275c8addbbb50cd4e6a6839952682a33255b447277e37a6f518d6972f4e1"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ab8b9050752b16a8b53fcd9853bf07d8daf19093533e990085168f40c64d978"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:42e56557bc7c5c10d3e42c3b32f6cff649a29d637e8f4e8b311d334cc4326730"}, + {file = "pyarrow-16.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a7abdee4a4a7cfa239e2e8d721224c4b34ffe69a0ca7981354fe03c1328789b"}, + {file = "pyarrow-16.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:ef2f309b68396bcc5a354106741d333494d6a0d3e1951271849787109f0229a6"}, + {file = "pyarrow-16.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ed66e5217b4526fa3585b5e39b0b82f501b88a10d36bd0d2a4d8aa7b5a48e2df"}, + {file = "pyarrow-16.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc8814310486f2a73c661ba8354540f17eef51e1b6dd090b93e3419d3a097b3a"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c2f5e239db7ed43e0ad2baf46a6465f89c824cc703f38ef0fde927d8e0955f7"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f293e92d1db251447cb028ae12f7bc47526e4649c3a9924c8376cab4ad6b98bd"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:dd9334a07b6dc21afe0857aa31842365a62eca664e415a3f9536e3a8bb832c07"}, + {file = "pyarrow-16.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d91073d1e2fef2c121154680e2ba7e35ecf8d4969cc0af1fa6f14a8675858159"}, + {file = "pyarrow-16.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:71d52561cd7aefd22cf52538f262850b0cc9e4ec50af2aaa601da3a16ef48877"}, + {file = "pyarrow-16.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b93c9a50b965ee0bf4fef65e53b758a7e8dcc0c2d86cebcc037aaaf1b306ecc0"}, + {file = "pyarrow-16.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d831690844706e374c455fba2fb8cfcb7b797bfe53ceda4b54334316e1ac4fa4"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35692ce8ad0b8c666aa60f83950957096d92f2a9d8d7deda93fb835e6053307e"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dd3151d098e56f16a8389c1247137f9e4c22720b01c6f3aa6dec29a99b74d80"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bd40467bdb3cbaf2044ed7a6f7f251c8f941c8b31275aaaf88e746c4f3ca4a7a"}, + {file = "pyarrow-16.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:00a1dcb22ad4ceb8af87f7bd30cc3354788776c417f493089e0a0af981bc8d80"}, + {file = "pyarrow-16.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fda9a7cebd1b1d46c97b511f60f73a5b766a6de4c5236f144f41a5d5afec1f35"}, + {file = "pyarrow-16.0.0.tar.gz", hash = "sha256:59bb1f1edbbf4114c72415f039f1359f1a57d166a331c3229788ccbfbb31689a"}, +] + +[package.dependencies] +numpy = ">=1.16.6" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pycryptodome" +version = "3.20.0" +description = "Cryptographic library for Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "pycryptodome-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a"}, + {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f"}, + {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d"}, + {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd"}, + {file = "pycryptodome-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33"}, + {file = "pycryptodome-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690"}, + {file = "pycryptodome-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f"}, + {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091"}, + {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4"}, + {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc"}, + {file = "pycryptodome-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818"}, + {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044"}, + {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a"}, + {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2"}, + {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c"}, + {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25"}, + {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128"}, + {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c"}, + {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4"}, + {file = "pycryptodome-3.20.0-cp35-abi3-win32.whl", hash = "sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72"}, + {file = "pycryptodome-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9"}, + {file = "pycryptodome-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a"}, + {file = "pycryptodome-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e"}, + {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04"}, + {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3"}, + {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea"}, + {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b"}, + {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6"}, + {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab"}, + {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5"}, + {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e"}, + {file = "pycryptodome-3.20.0.tar.gz", hash = "sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7"}, +] + +[[package]] +name = "pydantic" +version = "2.7.1" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.18.2" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.18.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymilvus" +version = "2.4.0" +description = "Python Sdk for Milvus" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymilvus-2.4.0-1-py3-none-any.whl", hash = "sha256:9f8212af51bc235a4c1230c344a852b6f75a4be01cac38f21dab5b65695a9598"}, + {file = "pymilvus-2.4.0.tar.gz", hash = "sha256:f1d1a2e9d5172fea3e0a5d396bed9de561a2b93a4d3e1945fdf68b74d771fb05"}, +] + +[package.dependencies] +azure-storage-blob = "*" +environs = "<=9.5.0" +grpcio = ">=1.49.1,<=1.60.0" +minio = ">=7.0.0" +numpy = {version = "<1.25.0", markers = "python_version <= \"3.8\""} +pandas = ">=1.2.4" +protobuf = ">=3.20.0" +pyarrow = ">=12.0.0" +requests = "*" +setuptools = ">=67" +ujson = ">=2.0.0" + +[package.extras] +model = ["milvus-model (>=0.1.0)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "regex" +version = "2024.4.28" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +files = [ + {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd196d056b40af073d95a2879678585f0b74ad35190fac04ca67954c582c6b61"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bb381f777351bd534462f63e1c6afb10a7caa9fa2a421ae22c26e796fe31b1f"}, + {file = "regex-2024.4.28-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:47af45b6153522733aa6e92543938e97a70ce0900649ba626cf5aad290b737b6"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99d6a550425cc51c656331af0e2b1651e90eaaa23fb4acde577cf15068e2e20f"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bf29304a8011feb58913c382902fde3395957a47645bf848eea695839aa101b7"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:92da587eee39a52c91aebea8b850e4e4f095fe5928d415cb7ed656b3460ae79a"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6277d426e2f31bdbacb377d17a7475e32b2d7d1f02faaecc48d8e370c6a3ff31"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28e1f28d07220c0f3da0e8fcd5a115bbb53f8b55cecf9bec0c946eb9a059a94c"}, + {file = "regex-2024.4.28-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aaa179975a64790c1f2701ac562b5eeb733946eeb036b5bcca05c8d928a62f10"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6f435946b7bf7a1b438b4e6b149b947c837cb23c704e780c19ba3e6855dbbdd3"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:19d6c11bf35a6ad077eb23852827f91c804eeb71ecb85db4ee1386825b9dc4db"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:fdae0120cddc839eb8e3c15faa8ad541cc6d906d3eb24d82fb041cfe2807bc1e"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e672cf9caaf669053121f1766d659a8813bd547edef6e009205378faf45c67b8"}, + {file = "regex-2024.4.28-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f57515750d07e14743db55d59759893fdb21d2668f39e549a7d6cad5d70f9fea"}, + {file = "regex-2024.4.28-cp310-cp310-win32.whl", hash = "sha256:a1409c4eccb6981c7baabc8888d3550df518add6e06fe74fa1d9312c1838652d"}, + {file = "regex-2024.4.28-cp310-cp310-win_amd64.whl", hash = "sha256:1f687a28640f763f23f8a9801fe9e1b37338bb1ca5d564ddd41619458f1f22d1"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:84077821c85f222362b72fdc44f7a3a13587a013a45cf14534df1cbbdc9a6796"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b45d4503de8f4f3dc02f1d28a9b039e5504a02cc18906cfe744c11def942e9eb"}, + {file = "regex-2024.4.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:457c2cd5a646dd4ed536c92b535d73548fb8e216ebee602aa9f48e068fc393f3"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b51739ddfd013c6f657b55a508de8b9ea78b56d22b236052c3a85a675102dc6"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:459226445c7d7454981c4c0ce0ad1a72e1e751c3e417f305722bbcee6697e06a"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:670fa596984b08a4a769491cbdf22350431970d0112e03d7e4eeaecaafcd0fec"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe00f4fe11c8a521b173e6324d862ee7ee3412bf7107570c9b564fe1119b56fb"}, + {file = "regex-2024.4.28-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:36f392dc7763fe7924575475736bddf9ab9f7a66b920932d0ea50c2ded2f5636"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:23a412b7b1a7063f81a742463f38821097b6a37ce1e5b89dd8e871d14dbfd86b"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f1d6e4b7b2ae3a6a9df53efbf199e4bfcff0959dbdb5fd9ced34d4407348e39a"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:499334ad139557de97cbc4347ee921c0e2b5e9c0f009859e74f3f77918339257"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:0940038bec2fe9e26b203d636c44d31dd8766abc1fe66262da6484bd82461ccf"}, + {file = "regex-2024.4.28-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:66372c2a01782c5fe8e04bff4a2a0121a9897e19223d9eab30c54c50b2ebeb7f"}, + {file = "regex-2024.4.28-cp311-cp311-win32.whl", hash = "sha256:c77d10ec3c1cf328b2f501ca32583625987ea0f23a0c2a49b37a39ee5c4c4630"}, + {file = "regex-2024.4.28-cp311-cp311-win_amd64.whl", hash = "sha256:fc0916c4295c64d6890a46e02d4482bb5ccf33bf1a824c0eaa9e83b148291f90"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:08a1749f04fee2811c7617fdd46d2e46d09106fa8f475c884b65c01326eb15c5"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b8eb28995771c087a73338f695a08c9abfdf723d185e57b97f6175c5051ff1ae"}, + {file = "regex-2024.4.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dd7ef715ccb8040954d44cfeff17e6b8e9f79c8019daae2fd30a8806ef5435c0"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb0315a2b26fde4005a7c401707c5352df274460f2f85b209cf6024271373013"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fc053228a6bd3a17a9b0a3f15c3ab3cf95727b00557e92e1cfe094b88cc662"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fe9739a686dc44733d52d6e4f7b9c77b285e49edf8570754b322bca6b85b4cc"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74fcf77d979364f9b69fcf8200849ca29a374973dc193a7317698aa37d8b01c"}, + {file = "regex-2024.4.28-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:965fd0cf4694d76f6564896b422724ec7b959ef927a7cb187fc6b3f4e4f59833"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2fef0b38c34ae675fcbb1b5db760d40c3fc3612cfa186e9e50df5782cac02bcd"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bc365ce25f6c7c5ed70e4bc674f9137f52b7dd6a125037f9132a7be52b8a252f"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ac69b394764bb857429b031d29d9604842bc4cbfd964d764b1af1868eeebc4f0"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:144a1fc54765f5c5c36d6d4b073299832aa1ec6a746a6452c3ee7b46b3d3b11d"}, + {file = "regex-2024.4.28-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2630ca4e152c221072fd4a56d4622b5ada876f668ecd24d5ab62544ae6793ed6"}, + {file = "regex-2024.4.28-cp312-cp312-win32.whl", hash = "sha256:7f3502f03b4da52bbe8ba962621daa846f38489cae5c4a7b5d738f15f6443d17"}, + {file = "regex-2024.4.28-cp312-cp312-win_amd64.whl", hash = "sha256:0dd3f69098511e71880fb00f5815db9ed0ef62c05775395968299cb400aeab82"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:374f690e1dd0dbdcddea4a5c9bdd97632cf656c69113f7cd6a361f2a67221cb6"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f87ae6b96374db20f180eab083aafe419b194e96e4f282c40191e71980c666"}, + {file = "regex-2024.4.28-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5dbc1bcc7413eebe5f18196e22804a3be1bfdfc7e2afd415e12c068624d48247"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f85151ec5a232335f1be022b09fbbe459042ea1951d8a48fef251223fc67eee1"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57ba112e5530530fd175ed550373eb263db4ca98b5f00694d73b18b9a02e7185"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:224803b74aab56aa7be313f92a8d9911dcade37e5f167db62a738d0c85fdac4b"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54a047b607fd2d2d52a05e6ad294602f1e0dec2291152b745870afc47c1397"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a2a512d623f1f2d01d881513af9fc6a7c46e5cfffb7dc50c38ce959f9246c94"}, + {file = "regex-2024.4.28-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c06bf3f38f0707592898428636cbb75d0a846651b053a1cf748763e3063a6925"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1031a5e7b048ee371ab3653aad3030ecfad6ee9ecdc85f0242c57751a05b0ac4"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7a353ebfa7154c871a35caca7bfd8f9e18666829a1dc187115b80e35a29393e"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:7e76b9cfbf5ced1aca15a0e5b6f229344d9b3123439ffce552b11faab0114a02"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5ce479ecc068bc2a74cb98dd8dba99e070d1b2f4a8371a7dfe631f85db70fe6e"}, + {file = "regex-2024.4.28-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d77b6f63f806578c604dca209280e4c54f0fa9a8128bb8d2cc5fb6f99da4150"}, + {file = "regex-2024.4.28-cp38-cp38-win32.whl", hash = "sha256:d84308f097d7a513359757c69707ad339da799e53b7393819ec2ea36bc4beb58"}, + {file = "regex-2024.4.28-cp38-cp38-win_amd64.whl", hash = "sha256:2cc1b87bba1dd1a898e664a31012725e48af826bf3971e786c53e32e02adae6c"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7413167c507a768eafb5424413c5b2f515c606be5bb4ef8c5dee43925aa5718b"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:108e2dcf0b53a7c4ab8986842a8edcb8ab2e59919a74ff51c296772e8e74d0ae"}, + {file = "regex-2024.4.28-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1c5742c31ba7d72f2dedf7968998730664b45e38827637e0f04a2ac7de2f5f1"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecc6148228c9ae25ce403eade13a0961de1cb016bdb35c6eafd8e7b87ad028b1"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7d893c8cf0e2429b823ef1a1d360a25950ed11f0e2a9df2b5198821832e1947"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4290035b169578ffbbfa50d904d26bec16a94526071ebec3dadbebf67a26b25e"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a22ae1cfd82e4ffa2066eb3390777dc79468f866f0625261a93e44cdf6482b"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd24fd140b69f0b0bcc9165c397e9b2e89ecbeda83303abf2a072609f60239e2"}, + {file = "regex-2024.4.28-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:39fb166d2196413bead229cd64a2ffd6ec78ebab83fff7d2701103cf9f4dfd26"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9301cc6db4d83d2c0719f7fcda37229691745168bf6ae849bea2e85fc769175d"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7c3d389e8d76a49923683123730c33e9553063d9041658f23897f0b396b2386f"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:99ef6289b62042500d581170d06e17f5353b111a15aa6b25b05b91c6886df8fc"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b91d529b47798c016d4b4c1d06cc826ac40d196da54f0de3c519f5a297c5076a"}, + {file = "regex-2024.4.28-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:43548ad74ea50456e1c68d3c67fff3de64c6edb85bcd511d1136f9b5376fc9d1"}, + {file = "regex-2024.4.28-cp39-cp39-win32.whl", hash = "sha256:05d9b6578a22db7dedb4df81451f360395828b04f4513980b6bd7a1412c679cc"}, + {file = "regex-2024.4.28-cp39-cp39-win_amd64.whl", hash = "sha256:3986217ec830c2109875be740531feb8ddafe0dfa49767cdcd072ed7e8927962"}, + {file = "regex-2024.4.28.tar.gz", hash = "sha256:83ab366777ea45d58f72593adf35d36ca911ea8bd838483c1823b883a121b0e4"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "scipy" +version = "1.9.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[package.dependencies] +numpy = ">=1.18.5,<1.26.0" + +[package.extras] +dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] +test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "setuptools" +version = "69.5.1" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, + {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.29" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.29-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c142852ae192e9fe5aad5c350ea6befe9db14370b34047e1f0f7cf99e63c63b"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:99a1e69d4e26f71e750e9ad6fdc8614fbddb67cfe2173a3628a2566034e223c7"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ef3fbccb4058355053c51b82fd3501a6e13dd808c8d8cd2561e610c5456013c"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d6753305936eddc8ed190e006b7bb33a8f50b9854823485eed3a886857ab8d1"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0f3ca96af060a5250a8ad5a63699180bc780c2edf8abf96c58af175921df847a"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c4520047006b1d3f0d89e0532978c0688219857eb2fee7c48052560ae76aca1e"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-win32.whl", hash = "sha256:b2a0e3cf0caac2085ff172c3faacd1e00c376e6884b5bc4dd5b6b84623e29e4f"}, + {file = "SQLAlchemy-2.0.29-cp310-cp310-win_amd64.whl", hash = "sha256:01d10638a37460616708062a40c7b55f73e4d35eaa146781c683e0fa7f6c43fb"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:308ef9cb41d099099fffc9d35781638986870b29f744382904bf9c7dadd08513"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:296195df68326a48385e7a96e877bc19aa210e485fa381c5246bc0234c36c78e"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a13b917b4ffe5a0a31b83d051d60477819ddf18276852ea68037a144a506efb9"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f6d971255d9ddbd3189e2e79d743ff4845c07f0633adfd1de3f63d930dbe673"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:61405ea2d563407d316c63a7b5271ae5d274a2a9fbcd01b0aa5503635699fa1e"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:de7202ffe4d4a8c1e3cde1c03e01c1a3772c92858837e8f3879b497158e4cb44"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-win32.whl", hash = "sha256:b5d7ed79df55a731749ce65ec20d666d82b185fa4898430b17cb90c892741520"}, + {file = "SQLAlchemy-2.0.29-cp311-cp311-win_amd64.whl", hash = "sha256:205f5a2b39d7c380cbc3b5dcc8f2762fb5bcb716838e2d26ccbc54330775b003"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d96710d834a6fb31e21381c6d7b76ec729bd08c75a25a5184b1089141356171f"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:52de4736404e53c5c6a91ef2698c01e52333988ebdc218f14c833237a0804f1b"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c7b02525ede2a164c5fa5014915ba3591730f2cc831f5be9ff3b7fd3e30958e"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dfefdb3e54cd15f5d56fd5ae32f1da2d95d78319c1f6dfb9bcd0eb15d603d5d"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a88913000da9205b13f6f195f0813b6ffd8a0c0c2bd58d499e00a30eb508870c"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fecd5089c4be1bcc37c35e9aa678938d2888845a134dd016de457b942cf5a758"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-win32.whl", hash = "sha256:8197d6f7a3d2b468861ebb4c9f998b9df9e358d6e1cf9c2a01061cb9b6cf4e41"}, + {file = "SQLAlchemy-2.0.29-cp312-cp312-win_amd64.whl", hash = "sha256:9b19836ccca0d321e237560e475fd99c3d8655d03da80c845c4da20dda31b6e1"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:87a1d53a5382cdbbf4b7619f107cc862c1b0a4feb29000922db72e5a66a5ffc0"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a0732dffe32333211801b28339d2a0babc1971bc90a983e3035e7b0d6f06b93"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90453597a753322d6aa770c5935887ab1fc49cc4c4fdd436901308383d698b4b"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ea311d4ee9a8fa67f139c088ae9f905fcf0277d6cd75c310a21a88bf85e130f5"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5f20cb0a63a3e0ec4e169aa8890e32b949c8145983afa13a708bc4b0a1f30e03"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-win32.whl", hash = "sha256:e5bbe55e8552019c6463709b39634a5fc55e080d0827e2a3a11e18eb73f5cdbd"}, + {file = "SQLAlchemy-2.0.29-cp37-cp37m-win_amd64.whl", hash = "sha256:c2f9c762a2735600654c654bf48dad388b888f8ce387b095806480e6e4ff6907"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e614d7a25a43a9f54fcce4675c12761b248547f3d41b195e8010ca7297c369c"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:471fcb39c6adf37f820350c28aac4a7df9d3940c6548b624a642852e727ea586"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:988569c8732f54ad3234cf9c561364221a9e943b78dc7a4aaf35ccc2265f1930"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dddaae9b81c88083e6437de95c41e86823d150f4ee94bf24e158a4526cbead01"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:334184d1ab8f4c87f9652b048af3f7abea1c809dfe526fb0435348a6fef3d380"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:38b624e5cf02a69b113c8047cf7f66b5dfe4a2ca07ff8b8716da4f1b3ae81567"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-win32.whl", hash = "sha256:bab41acf151cd68bc2b466deae5deeb9e8ae9c50ad113444151ad965d5bf685b"}, + {file = "SQLAlchemy-2.0.29-cp38-cp38-win_amd64.whl", hash = "sha256:52c8011088305476691b8750c60e03b87910a123cfd9ad48576d6414b6ec2a1d"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3071ad498896907a5ef756206b9dc750f8e57352113c19272bdfdc429c7bd7de"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dba622396a3170974f81bad49aacebd243455ec3cc70615aeaef9e9613b5bca5"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b184e3de58009cc0bf32e20f137f1ec75a32470f5fede06c58f6c355ed42a72"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c37f1050feb91f3d6c32f864d8e114ff5545a4a7afe56778d76a9aec62638ba"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bda7ce59b06d0f09afe22c56714c65c957b1068dee3d5e74d743edec7daba552"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:25664e18bef6dc45015b08f99c63952a53a0a61f61f2e48a9e70cec27e55f699"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-win32.whl", hash = "sha256:77d29cb6c34b14af8a484e831ab530c0f7188f8efed1c6a833a2c674bf3c26ec"}, + {file = "SQLAlchemy-2.0.29-cp39-cp39-win_amd64.whl", hash = "sha256:04c487305ab035a9548f573763915189fc0fe0824d9ba28433196f8436f1449c"}, + {file = "SQLAlchemy-2.0.29-py3-none-any.whl", hash = "sha256:dc4ee2d4ee43251905f88637d5281a8d52e916a021384ec10758826f5cbae305"}, + {file = "SQLAlchemy-2.0.29.tar.gz", hash = "sha256:bd9566b8e58cabd700bc367b60e90d9349cd16f0984973f98a9a09f9c64e86f0"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "sse-starlette" +version = "1.8.2" +description = "SSE plugin for Starlette" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sse_starlette-1.8.2-py3-none-any.whl", hash = "sha256:70cc7ef5aca4abe8a25dec1284cce4fe644dd7bf0c406d3e852e516092b7f849"}, + {file = "sse_starlette-1.8.2.tar.gz", hash = "sha256:e0f9b8dec41adc092a0a6e0694334bd3cfd3084c44c497a6ebc1fb4bdd919acd"}, +] + +[package.dependencies] +anyio = "*" +fastapi = "*" +starlette = "*" +uvicorn = "*" + +[[package]] +name = "starlette" +version = "0.27.0" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.7" +files = [ + {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, + {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] + +[[package]] +name = "tenacity" +version = "8.2.3" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, + {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, +] + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + +[[package]] +name = "tiktoken" +version = "0.6.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:277de84ccd8fa12730a6b4067456e5cf72fef6300bea61d506c09e45658d41ac"}, + {file = "tiktoken-0.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c44433f658064463650d61387623735641dcc4b6c999ca30bc0f8ba3fccaf5c"}, + {file = "tiktoken-0.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afb9a2a866ae6eef1995ab656744287a5ac95acc7e0491c33fad54d053288ad3"}, + {file = "tiktoken-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62c05b3109fefca26fedb2820452a050074ad8e5ad9803f4652977778177d9f"}, + {file = "tiktoken-0.6.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ef917fad0bccda07bfbad835525bbed5f3ab97a8a3e66526e48cdc3e7beacf7"}, + {file = "tiktoken-0.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e095131ab6092d0769a2fda85aa260c7c383072daec599ba9d8b149d2a3f4d8b"}, + {file = "tiktoken-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:05b344c61779f815038292a19a0c6eb7098b63c8f865ff205abb9ea1b656030e"}, + {file = "tiktoken-0.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cefb9870fb55dca9e450e54dbf61f904aab9180ff6fe568b61f4db9564e78871"}, + {file = "tiktoken-0.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:702950d33d8cabc039845674107d2e6dcabbbb0990ef350f640661368df481bb"}, + {file = "tiktoken-0.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8d49d076058f23254f2aff9af603863c5c5f9ab095bc896bceed04f8f0b013a"}, + {file = "tiktoken-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:430bc4e650a2d23a789dc2cdca3b9e5e7eb3cd3935168d97d43518cbb1f9a911"}, + {file = "tiktoken-0.6.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:293cb8669757301a3019a12d6770bd55bec38a4d3ee9978ddbe599d68976aca7"}, + {file = "tiktoken-0.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bd1a288b7903aadc054b0e16ea78e3171f70b670e7372432298c686ebf9dd47"}, + {file = "tiktoken-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac76e000183e3b749634968a45c7169b351e99936ef46f0d2353cd0d46c3118d"}, + {file = "tiktoken-0.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17cc8a4a3245ab7d935c83a2db6bb71619099d7284b884f4b2aea4c74f2f83e3"}, + {file = "tiktoken-0.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:284aebcccffe1bba0d6571651317df6a5b376ff6cfed5aeb800c55df44c78177"}, + {file = "tiktoken-0.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c1a3a5d33846f8cd9dd3b7897c1d45722f48625a587f8e6f3d3e85080559be8"}, + {file = "tiktoken-0.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6318b2bb2337f38ee954fd5efa82632c6e5ced1d52a671370fa4b2eff1355e91"}, + {file = "tiktoken-0.6.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f5f0f2ed67ba16373f9a6013b68da298096b27cd4e1cf276d2d3868b5c7efd1"}, + {file = "tiktoken-0.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:75af4c0b16609c2ad02581f3cdcd1fb698c7565091370bf6c0cf8624ffaba6dc"}, + {file = "tiktoken-0.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:45577faf9a9d383b8fd683e313cf6df88b6076c034f0a16da243bb1c139340c3"}, + {file = "tiktoken-0.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7c1492ab90c21ca4d11cef3a236ee31a3e279bb21b3fc5b0e2210588c4209e68"}, + {file = "tiktoken-0.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e2b380c5b7751272015400b26144a2bab4066ebb8daae9c3cd2a92c3b508fe5a"}, + {file = "tiktoken-0.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f497598b9f58c99cbc0eb764b4a92272c14d5203fc713dd650b896a03a50ad"}, + {file = "tiktoken-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e65e8bd6f3f279d80f1e1fbd5f588f036b9a5fa27690b7f0cc07021f1dfa0839"}, + {file = "tiktoken-0.6.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5f1495450a54e564d236769d25bfefbf77727e232d7a8a378f97acddee08c1ae"}, + {file = "tiktoken-0.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6c4e4857d99f6fb4670e928250835b21b68c59250520a1941618b5b4194e20c3"}, + {file = "tiktoken-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:168d718f07a39b013032741867e789971346df8e89983fe3c0ef3fbd5a0b1cb9"}, + {file = "tiktoken-0.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:47fdcfe11bd55376785a6aea8ad1db967db7f66ea81aed5c43fad497521819a4"}, + {file = "tiktoken-0.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fb7d2ccbf1a7784810aff6b80b4012fb42c6fc37eaa68cb3b553801a5cc2d1fc"}, + {file = "tiktoken-0.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ccb7a111ee76af5d876a729a347f8747d5ad548e1487eeea90eaf58894b3138"}, + {file = "tiktoken-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2048e1086b48e3c8c6e2ceeac866561374cd57a84622fa49a6b245ffecb7744"}, + {file = "tiktoken-0.6.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:07f229a5eb250b6403a61200199cecf0aac4aa23c3ecc1c11c1ca002cbb8f159"}, + {file = "tiktoken-0.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:432aa3be8436177b0db5a2b3e7cc28fd6c693f783b2f8722539ba16a867d0c6a"}, + {file = "tiktoken-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8bfe8a19c8b5c40d121ee7938cd9c6a278e5b97dc035fd61714b4f0399d2f7a1"}, + {file = "tiktoken-0.6.0.tar.gz", hash = "sha256:ace62a4ede83c75b0374a2ddfa4b76903cf483e9cb06247f566be3bf14e6beed"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tomlkit" +version = "0.12.4" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomlkit-0.12.4-py3-none-any.whl", hash = "sha256:5cd82d48a3dd89dee1f9d64420aa20ae65cfbd00668d6f094d7578a78efbb77b"}, + {file = "tomlkit-0.12.4.tar.gz", hash = "sha256:7ca1cfc12232806517a8515047ba66a19369e71edf2439d0f5824f91032b6cc3"}, +] + +[[package]] +name = "tqdm" +version = "4.66.2" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"}, + {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "typer" +version = "0.9.4" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.9.4-py3-none-any.whl", hash = "sha256:aa6c4a4e2329d868b80ecbaf16f807f2b54e192209d7ac9dd42691d63f7a54eb"}, + {file = "typer-0.9.4.tar.gz", hash = "sha256:f714c2d90afae3a7929fcd72a3abb08df305e1ff61719381384211c4070af57f"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"all\""} +rich = {version = ">=10.11.0,<14.0.0", optional = true, markers = "extra == \"all\""} +shellingham = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"all\""} +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.971)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] + +[[package]] +name = "typing-extensions" +version = "4.11.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, + {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "tzdata" +version = "2024.1" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, +] + +[[package]] +name = "ujson" +version = "5.9.0" +description = "Ultra fast JSON encoder and decoder for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ujson-5.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab71bf27b002eaf7d047c54a68e60230fbd5cd9da60de7ca0aa87d0bccead8fa"}, + {file = "ujson-5.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a365eac66f5aa7a7fdf57e5066ada6226700884fc7dce2ba5483538bc16c8c5"}, + {file = "ujson-5.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e015122b337858dba5a3dc3533af2a8fc0410ee9e2374092f6a5b88b182e9fcc"}, + {file = "ujson-5.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:779a2a88c53039bebfbccca934430dabb5c62cc179e09a9c27a322023f363e0d"}, + {file = "ujson-5.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10ca3c41e80509fd9805f7c149068fa8dbee18872bbdc03d7cca928926a358d5"}, + {file = "ujson-5.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a566e465cb2fcfdf040c2447b7dd9718799d0d90134b37a20dff1e27c0e9096"}, + {file = "ujson-5.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f833c529e922577226a05bc25b6a8b3eb6c4fb155b72dd88d33de99d53113124"}, + {file = "ujson-5.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b68a0caab33f359b4cbbc10065c88e3758c9f73a11a65a91f024b2e7a1257106"}, + {file = "ujson-5.9.0-cp310-cp310-win32.whl", hash = "sha256:7cc7e605d2aa6ae6b7321c3ae250d2e050f06082e71ab1a4200b4ae64d25863c"}, + {file = "ujson-5.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:a6d3f10eb8ccba4316a6b5465b705ed70a06011c6f82418b59278fbc919bef6f"}, + {file = "ujson-5.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b23bbb46334ce51ddb5dded60c662fbf7bb74a37b8f87221c5b0fec1ec6454b"}, + {file = "ujson-5.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6974b3a7c17bbf829e6c3bfdc5823c67922e44ff169851a755eab79a3dd31ec0"}, + {file = "ujson-5.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5964ea916edfe24af1f4cc68488448fbb1ec27a3ddcddc2b236da575c12c8ae"}, + {file = "ujson-5.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ba7cac47dd65ff88571eceeff48bf30ed5eb9c67b34b88cb22869b7aa19600d"}, + {file = "ujson-5.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bbd91a151a8f3358c29355a491e915eb203f607267a25e6ab10531b3b157c5e"}, + {file = "ujson-5.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:829a69d451a49c0de14a9fecb2a2d544a9b2c884c2b542adb243b683a6f15908"}, + {file = "ujson-5.9.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a807ae73c46ad5db161a7e883eec0fbe1bebc6a54890152ccc63072c4884823b"}, + {file = "ujson-5.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8fc2aa18b13d97b3c8ccecdf1a3c405f411a6e96adeee94233058c44ff92617d"}, + {file = "ujson-5.9.0-cp311-cp311-win32.whl", hash = "sha256:70e06849dfeb2548be48fdd3ceb53300640bc8100c379d6e19d78045e9c26120"}, + {file = "ujson-5.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:7309d063cd392811acc49b5016728a5e1b46ab9907d321ebbe1c2156bc3c0b99"}, + {file = "ujson-5.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:20509a8c9f775b3a511e308bbe0b72897ba6b800767a7c90c5cca59d20d7c42c"}, + {file = "ujson-5.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b28407cfe315bd1b34f1ebe65d3bd735d6b36d409b334100be8cdffae2177b2f"}, + {file = "ujson-5.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d302bd17989b6bd90d49bade66943c78f9e3670407dbc53ebcf61271cadc399"}, + {file = "ujson-5.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f21315f51e0db8ee245e33a649dd2d9dce0594522de6f278d62f15f998e050e"}, + {file = "ujson-5.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5635b78b636a54a86fdbf6f027e461aa6c6b948363bdf8d4fbb56a42b7388320"}, + {file = "ujson-5.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:82b5a56609f1235d72835ee109163c7041b30920d70fe7dac9176c64df87c164"}, + {file = "ujson-5.9.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5ca35f484622fd208f55041b042d9d94f3b2c9c5add4e9af5ee9946d2d30db01"}, + {file = "ujson-5.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:829b824953ebad76d46e4ae709e940bb229e8999e40881338b3cc94c771b876c"}, + {file = "ujson-5.9.0-cp312-cp312-win32.whl", hash = "sha256:25fa46e4ff0a2deecbcf7100af3a5d70090b461906f2299506485ff31d9ec437"}, + {file = "ujson-5.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:60718f1720a61560618eff3b56fd517d107518d3c0160ca7a5a66ac949c6cf1c"}, + {file = "ujson-5.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d581db9db9e41d8ea0b2705c90518ba623cbdc74f8d644d7eb0d107be0d85d9c"}, + {file = "ujson-5.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ff741a5b4be2d08fceaab681c9d4bc89abf3c9db600ab435e20b9b6d4dfef12e"}, + {file = "ujson-5.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdcb02cabcb1e44381221840a7af04433c1dc3297af76fde924a50c3054c708c"}, + {file = "ujson-5.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e208d3bf02c6963e6ef7324dadf1d73239fb7008491fdf523208f60be6437402"}, + {file = "ujson-5.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4b3917296630a075e04d3d07601ce2a176479c23af838b6cf90a2d6b39b0d95"}, + {file = "ujson-5.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0c4d6adb2c7bb9eb7c71ad6f6f612e13b264942e841f8cc3314a21a289a76c4e"}, + {file = "ujson-5.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0b159efece9ab5c01f70b9d10bbb77241ce111a45bc8d21a44c219a2aec8ddfd"}, + {file = "ujson-5.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0cb4a7814940ddd6619bdce6be637a4b37a8c4760de9373bac54bb7b229698b"}, + {file = "ujson-5.9.0-cp38-cp38-win32.whl", hash = "sha256:dc80f0f5abf33bd7099f7ac94ab1206730a3c0a2d17549911ed2cb6b7aa36d2d"}, + {file = "ujson-5.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:506a45e5fcbb2d46f1a51fead991c39529fc3737c0f5d47c9b4a1d762578fc30"}, + {file = "ujson-5.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0fd2eba664a22447102062814bd13e63c6130540222c0aa620701dd01f4be81"}, + {file = "ujson-5.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bdf7fc21a03bafe4ba208dafa84ae38e04e5d36c0e1c746726edf5392e9f9f36"}, + {file = "ujson-5.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2f909bc08ce01f122fd9c24bc6f9876aa087188dfaf3c4116fe6e4daf7e194f"}, + {file = "ujson-5.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd4ea86c2afd41429751d22a3ccd03311c067bd6aeee2d054f83f97e41e11d8f"}, + {file = "ujson-5.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63fb2e6599d96fdffdb553af0ed3f76b85fda63281063f1cb5b1141a6fcd0617"}, + {file = "ujson-5.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:32bba5870c8fa2a97f4a68f6401038d3f1922e66c34280d710af00b14a3ca562"}, + {file = "ujson-5.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:37ef92e42535a81bf72179d0e252c9af42a4ed966dc6be6967ebfb929a87bc60"}, + {file = "ujson-5.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f69f16b8f1c69da00e38dc5f2d08a86b0e781d0ad3e4cc6a13ea033a439c4844"}, + {file = "ujson-5.9.0-cp39-cp39-win32.whl", hash = "sha256:3382a3ce0ccc0558b1c1668950008cece9bf463ebb17463ebf6a8bfc060dae34"}, + {file = "ujson-5.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:6adef377ed583477cf005b58c3025051b5faa6b8cc25876e594afbb772578f21"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ffdfebd819f492e48e4f31c97cb593b9c1a8251933d8f8972e81697f00326ff1"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4eec2ddc046360d087cf35659c7ba0cbd101f32035e19047013162274e71fcf"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbb90aa5c23cb3d4b803c12aa220d26778c31b6e4b7a13a1f49971f6c7d088e"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0823cb70866f0d6a4ad48d998dd338dce7314598721bc1b7986d054d782dfd"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4e35d7885ed612feb6b3dd1b7de28e89baaba4011ecdf995e88be9ac614765e9"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b048aa93eace8571eedbd67b3766623e7f0acbf08ee291bef7d8106210432427"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:323279e68c195110ef85cbe5edce885219e3d4a48705448720ad925d88c9f851"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ac92d86ff34296f881e12aa955f7014d276895e0e4e868ba7fddebbde38e378"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6eecbd09b316cea1fd929b1e25f70382917542ab11b692cb46ec9b0a26c7427f"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:473fb8dff1d58f49912323d7cb0859df5585cfc932e4b9c053bf8cf7f2d7c5c4"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f91719c6abafe429c1a144cfe27883eace9fb1c09a9c5ef1bcb3ae80a3076a4e"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1c0991c4fe256f5fdb19758f7eac7f47caac29a6c57d0de16a19048eb86bad"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a8ea0f55a1396708e564595aaa6696c0d8af532340f477162ff6927ecc46e21"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:07e0cfdde5fd91f54cd2d7ffb3482c8ff1bf558abf32a8b953a5d169575ae1cd"}, + {file = "ujson-5.9.0.tar.gz", hash = "sha256:89cc92e73d5501b8a7f48575eeb14ad27156ad092c2e9fc7e3cf949f07e75532"}, +] + +[[package]] +name = "urllib3" +version = "2.2.1" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.23.2" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.23.2-py3-none-any.whl", hash = "sha256:1f9be6558f01239d4fdf22ef8126c39cb1ad0addf76c40e760549d2c2f43ab53"}, + {file = "uvicorn-0.23.2.tar.gz", hash = "sha256:4d3cc12d7727ba72b64d12d3cc7743124074c0a69f7b201512fc50c3e3f1569a"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "yarl" +version = "1.9.4" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[metadata] +lock-version = "2.0" +python-versions = ">=3.8.1,<4.0" +content-hash = "e184bab7c13245b660890fcac2dfe03a38296251a832daf1ace8478ccb95c694" diff --git a/templates/rag-milvus/pyproject.toml b/templates/rag-milvus/pyproject.toml new file mode 100644 index 00000000000..2adf18cd7de --- /dev/null +++ b/templates/rag-milvus/pyproject.toml @@ -0,0 +1,34 @@ +[tool.poetry] +name = "rag-milvus" +version = "0.1.0" +description = "RAG using Milvus" +authors = [] +readme = "README.md" + +[tool.poetry.dependencies] +python = ">=3.8.1,<4.0" +langchain = "^0.1" +langchain-core = "^0.1" +langchain-openai = "^0.1" +langchain-community = "^0.0.30" +pymilvus = "^2.4" +scipy = "^1.9" + +[tool.poetry.group.dev.dependencies] +langchain-cli = ">=0.0.4" +fastapi = "^0.104.0" +sse-starlette = "^1.6.5" + +[tool.langserve] +export_module = "rag_milvus" +export_attr = "chain" + +[tool.templates-hub] +use-case = "rag" +author = "LangChain" +integrations = ["OpenAI", "Milvus"] +tags = ["vectordbs"] + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/templates/rag-milvus/rag_milvus/__init__.py b/templates/rag-milvus/rag_milvus/__init__.py new file mode 100644 index 00000000000..cf9e1eac267 --- /dev/null +++ b/templates/rag-milvus/rag_milvus/__init__.py @@ -0,0 +1,3 @@ +from rag_milvus.chain import chain + +__all__ = ["chain"] diff --git a/templates/rag-milvus/rag_milvus/chain.py b/templates/rag-milvus/rag_milvus/chain.py new file mode 100644 index 00000000000..b48edd6b2cf --- /dev/null +++ b/templates/rag-milvus/rag_milvus/chain.py @@ -0,0 +1,69 @@ +from langchain_community.vectorstores import Milvus +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.pydantic_v1 import BaseModel +from langchain_core.runnables import RunnableParallel, RunnablePassthrough +from langchain_openai import ChatOpenAI, OpenAIEmbeddings + +# Example for document loading (from url), splitting, and creating vectorstore + +""" +# Load +from langchain_community.document_loaders import WebBaseLoader + +loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") +data = loader.load() + +# Split +from langchain_text_splitters import RecursiveCharacterTextSplitter + +text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) +all_splits = text_splitter.split_documents(data) + +# Add to vectorDB +vectorstore = Milvus.from_documents(documents=all_splits, + collection_name="rag_milvus", + embedding=OpenAIEmbeddings(), + drop_old=True, + ) +retriever = vectorstore.as_retriever() +""" + +# Embed a single document as a test +vectorstore = Milvus.from_texts( + ["harrison worked at kensho"], + collection_name="rag_milvus", + embedding=OpenAIEmbeddings(), + drop_old=True, + connection_args={ + "uri": "http://127.0.0.1:19530", + }, +) +retriever = vectorstore.as_retriever() + +# RAG prompt +template = """Answer the question based only on the following context: +{context} + +Question: {question} +""" +prompt = ChatPromptTemplate.from_template(template) + +# LLM +model = ChatOpenAI() + +# RAG chain +chain = ( + RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) + | prompt + | model + | StrOutputParser() +) + + +# Add typing for input +class Question(BaseModel): + __root__: str + + +chain = chain.with_types(input_type=Question) diff --git a/templates/rag-milvus/tests/__init__.py b/templates/rag-milvus/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/templates/rag-multi-modal-local/ingest.py b/templates/rag-multi-modal-local/ingest.py index e1ad090af00..9aad0cf6568 100644 --- a/templates/rag-multi-modal-local/ingest.py +++ b/templates/rag-multi-modal-local/ingest.py @@ -20,7 +20,7 @@ vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function -print("Loading embedding function") # noqa: T201 +print("Loading embedding function") embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma @@ -31,5 +31,5 @@ vectorstore_mmembd = Chroma( ) # Add images -print("Embedding images") # noqa: T201 +print("Embedding images") vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-multi-modal-mv-local/ingest.py b/templates/rag-multi-modal-mv-local/ingest.py index 72b9dbf7693..4e3f711bd2e 100644 --- a/templates/rag-multi-modal-mv-local/ingest.py +++ b/templates/rag-multi-modal-mv-local/ingest.py @@ -63,7 +63,7 @@ def generate_img_summaries(img_base64_list): image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: - print(f"Error with image {i+1}: {e}") # noqa: T201 + print(f"Error with image {i+1}: {e}") return image_summaries, processed_images @@ -162,14 +162,14 @@ def create_multi_vector_retriever(vectorstore, image_summaries, images): # Load images doc_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) -print("Read images") # noqa: T201 +print("Read images") pil_images = get_images(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Image summaries -print("Generate image summaries") # noqa: T201 +print("Generate image summaries") image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # The vectorstore to use to index the images summaries diff --git a/templates/rag-opensearch/dummy_index_setup.py b/templates/rag-opensearch/dummy_index_setup.py index e3bc286444a..bb658adf51e 100644 --- a/templates/rag-opensearch/dummy_index_setup.py +++ b/templates/rag-opensearch/dummy_index_setup.py @@ -41,7 +41,7 @@ index_settings = { response = client.indices.create(index=OPENSEARCH_INDEX_NAME, body=index_settings) -print(response) # noqa: T201 +print(response) # Insert docs @@ -57,4 +57,4 @@ for each in docs: response = client.index(index=OPENSEARCH_INDEX_NAME, body=document, refresh=True) - print(response) # noqa: T201 + print(response) diff --git a/templates/rag-redis-multi-modal-multi-vector/ingest.py b/templates/rag-redis-multi-modal-multi-vector/ingest.py index 00d6d029875..e916dd52fa7 100644 --- a/templates/rag-redis-multi-modal-multi-vector/ingest.py +++ b/templates/rag-redis-multi-modal-multi-vector/ingest.py @@ -62,7 +62,7 @@ def generate_img_summaries(img_base64_list): image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: - print(f"Error with image {i+1}: {e}") # noqa: T201 + print(f"Error with image {i+1}: {e}") return image_summaries, processed_images @@ -151,14 +151,14 @@ if __name__ == "__main__": doc_path = Path(__file__).parent / "docs/nvda-f3q24-investor-presentation-final.pdf" rel_doc_path = doc_path.relative_to(Path.cwd()) - print("Extract slides as images") # noqa: T201 + print("Extract slides as images") pil_images = get_images_from_pdf(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Generate image summaries - print("Generate image summaries") # noqa: T201 + print("Generate image summaries") image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # Create documents diff --git a/templates/rag-redis/ingest.py b/templates/rag-redis/ingest.py index b4963f6dc5d..db6413daa1a 100644 --- a/templates/rag-redis/ingest.py +++ b/templates/rag-redis/ingest.py @@ -17,7 +17,7 @@ def ingest_documents(): data_path = "data/" doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] - print("Parsing 10k filing doc for NIKE", doc) # noqa: T201 + print("Parsing 10k filing doc for NIKE", doc) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=100, add_start_index=True @@ -25,7 +25,7 @@ def ingest_documents(): loader = UnstructuredFileLoader(doc, mode="single", strategy="fast") chunks = loader.load_and_split(text_splitter) - print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") # noqa: T201 + print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") # Create vectorstore embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) diff --git a/templates/rag-self-query/main.py b/templates/rag-self-query/main.py index 8385dfaa4f2..83a1dc4c6cb 100644 --- a/templates/rag-self-query/main.py +++ b/templates/rag-self-query/main.py @@ -14,7 +14,7 @@ if __name__ == "__main__": "chat_history": [], } ) - print(response) # noqa: T201 + print(response) follow_up_question = "What are their objectives?" @@ -30,4 +30,4 @@ if __name__ == "__main__": } ) - print(response) # noqa: T201 + print(response) diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py index 223e76194b3..111d08b59d7 100644 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py +++ b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py @@ -47,7 +47,7 @@ def load_ts_git_dataset( with open(json_file_path, "w") as json_file: json_file.write(response.text) else: - print(f"Failed to download JSON file. Status code: {response.status_code}") # noqa: T201 + print(f"Failed to download JSON file. Status code: {response.status_code}") loader = JSONLoader( file_path=json_file_path, diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py index 223e76194b3..111d08b59d7 100644 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py +++ b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py @@ -47,7 +47,7 @@ def load_ts_git_dataset( with open(json_file_path, "w") as json_file: json_file.write(response.text) else: - print(f"Failed to download JSON file. Status code: {response.status_code}") # noqa: T201 + print(f"Failed to download JSON file. Status code: {response.status_code}") loader = JSONLoader( file_path=json_file_path, diff --git a/templates/research-assistant/research_assistant/search/web.py b/templates/research-assistant/research_assistant/search/web.py index 5f647904ab8..52ea08542d1 100644 --- a/templates/research-assistant/research_assistant/search/web.py +++ b/templates/research-assistant/research_assistant/search/web.py @@ -40,7 +40,7 @@ def scrape_text(url: str): else: return f"Failed to retrieve the webpage: Status code {response.status_code}" except Exception as e: - print(e) # noqa: T201 + print(e) return f"Failed to retrieve the webpage: {e}" diff --git a/templates/sql-llamacpp/sql_llamacpp/chain.py b/templates/sql-llamacpp/sql_llamacpp/chain.py index dd749681c66..1dd53f69a5e 100644 --- a/templates/sql-llamacpp/sql_llamacpp/chain.py +++ b/templates/sql-llamacpp/sql_llamacpp/chain.py @@ -19,15 +19,15 @@ url = ( ) # Check if file is present in the current directory if not os.path.exists(file_name): - print(f"'{file_name}' not found. Downloading...") # noqa: T201 + print(f"'{file_name}' not found. Downloading...") # Download the file response = requests.get(url) response.raise_for_status() # Raise an exception for HTTP errors with open(file_name, "wb") as f: f.write(response.content) - print(f"'{file_name}' has been downloaded.") # noqa: T201 + print(f"'{file_name}' has been downloaded.") else: - print(f"'{file_name}' already exists in the current directory.") # noqa: T201 + print(f"'{file_name}' already exists in the current directory.") # Add the LLM downloaded from HF model_path = file_name diff --git a/templates/sql-research-assistant/sql_research_assistant/chain.py b/templates/sql-research-assistant/sql_research_assistant/chain.py index 56f025df927..d04e14efd8b 100644 --- a/templates/sql-research-assistant/sql_research_assistant/chain.py +++ b/templates/sql-research-assistant/sql_research_assistant/chain.py @@ -17,6 +17,6 @@ chain = chain_notypes.with_types(input_type=InputType) if __name__ == "__main__": - print( # noqa: T201 + print( chain.invoke({"question": "who is typically older: point guards or centers?"}) ) diff --git a/templates/sql-research-assistant/sql_research_assistant/search/web.py b/templates/sql-research-assistant/sql_research_assistant/search/web.py index d1ac84c941c..cb8b8aa41aa 100644 --- a/templates/sql-research-assistant/sql_research_assistant/search/web.py +++ b/templates/sql-research-assistant/sql_research_assistant/search/web.py @@ -40,7 +40,7 @@ def scrape_text(url: str): else: return f"Failed to retrieve the webpage: Status code {response.status_code}" except Exception as e: - print(e) # noqa: T201 + print(e) return f"Failed to retrieve the webpage: {e}" diff --git a/templates/xml-agent/main.py b/templates/xml-agent/main.py index 55786914da3..02647eb8451 100644 --- a/templates/xml-agent/main.py +++ b/templates/xml-agent/main.py @@ -2,4 +2,4 @@ from xml_agent.agent import agent_executor if __name__ == "__main__": question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"question": question, "chat_history": []})) # noqa: T201 + print(agent_executor.invoke({"question": question, "chat_history": []}))