mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-09 10:41:52 +00:00
Compare commits
14 Commits
langchain-
...
cc/extende
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fdbd9f6eba | ||
|
|
9786f01106 | ||
|
|
0dfe63af2a | ||
|
|
5dfc2a89f7 | ||
|
|
22bee8147f | ||
|
|
4706541307 | ||
|
|
d7667da26d | ||
|
|
58e7175262 | ||
|
|
9ef23df3f8 | ||
|
|
8f78650181 | ||
|
|
48ca84dcfa | ||
|
|
fe832bef26 | ||
|
|
74a6079992 | ||
|
|
32e90ce912 |
27
.github/DISCUSSION_TEMPLATE/q-a.yml
vendored
27
.github/DISCUSSION_TEMPLATE/q-a.yml
vendored
@@ -96,27 +96,22 @@ body:
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
label: System Info
|
||||
description: |
|
||||
Please share your system info with us.
|
||||
Please share your system info with us. Do NOT skip this step and please don't trim
|
||||
the output. Most users don't include enough information here and it makes it harder
|
||||
for us to help you.
|
||||
|
||||
"pip freeze | grep langchain"
|
||||
platform (windows / linux / mac)
|
||||
python version
|
||||
|
||||
OR if you're on a recent version of langchain-core you can paste the output of:
|
||||
Run the following command in your terminal and paste the output here:
|
||||
|
||||
python -m langchain_core.sys_info
|
||||
|
||||
or if you have an existing python interpreter running:
|
||||
|
||||
from langchain_core import sys_info
|
||||
sys_info.print_sys_info()
|
||||
|
||||
alternatively, put the entire output of `pip freeze` here.
|
||||
placeholder: |
|
||||
"pip freeze | grep langchain"
|
||||
platform
|
||||
python version
|
||||
|
||||
Alternatively, if you're on a recent version of langchain-core you can paste the output of:
|
||||
|
||||
python -m langchain_core.sys_info
|
||||
|
||||
These will only surface LangChain packages, don't forget to include any other relevant
|
||||
packages you're using (if you're not sure what's relevant, you can paste the entire output of `pip freeze`).
|
||||
validations:
|
||||
required: true
|
||||
|
||||
4
.github/scripts/check_diff.py
vendored
4
.github/scripts/check_diff.py
vendored
@@ -68,7 +68,9 @@ def dependents_graph() -> dict:
|
||||
|
||||
# load extended deps from extended_testing_deps.txt
|
||||
package_path = Path(path).parent
|
||||
extended_requirement_path = package_path / "extended_testing_deps.txt"
|
||||
extended_requirement_path = (
|
||||
package_path / "extended_dependencies" / "extended_testing_deps.txt"
|
||||
)
|
||||
if extended_requirement_path.exists():
|
||||
with open(extended_requirement_path, "r") as f:
|
||||
extended_deps = f.read().splitlines()
|
||||
|
||||
73
.github/workflows/_extended_test.yml
vendored
Normal file
73
.github/workflows/_extended_test.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
name: Extended tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
default: "libs/community"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
default: "3.11"
|
||||
extended-deps-file:
|
||||
required: true
|
||||
type: choice
|
||||
description: "File to install extended dependencies from"
|
||||
options:
|
||||
- extended_testing_deps.txt
|
||||
- pdf_loader_deps.txt
|
||||
- other_deps.txt
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
runs-on: ubuntu-latest
|
||||
name: Python ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ inputs.python-version }} + Poetry ${{ env.POETRY_VERSION }}
|
||||
uses: "./.github/actions/poetry_setup"
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
poetry-version: ${{ env.POETRY_VERSION }}
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install extended dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
poetry install --with test,test_integration
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_dependencies/${{ inputs.extended-deps-file }}
|
||||
|
||||
- name: Install deps outside pyproject
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
|
||||
shell: bash
|
||||
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
|
||||
- name: Run extended tests
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
make integration_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
shell: bash
|
||||
run: |
|
||||
set -eu
|
||||
|
||||
STATUS="$(git status)"
|
||||
echo "$STATUS"
|
||||
|
||||
# grep will exit non-zero if the target message isn't found,
|
||||
# and `set -e` above will cause the step to fail.
|
||||
echo "$STATUS" | grep 'nothing to commit, working tree clean'
|
||||
65
.github/workflows/_integration_test.yml
vendored
65
.github/workflows/_integration_test.yml
vendored
@@ -1,3 +1,6 @@
|
||||
# Ignore changes to this file. Hijacking just to allow
|
||||
# testing of workflow dispatch on new workflow off of branch.
|
||||
|
||||
name: Integration tests
|
||||
|
||||
on:
|
||||
@@ -6,10 +9,20 @@ on:
|
||||
working-directory:
|
||||
required: true
|
||||
type: string
|
||||
default: "libs/community"
|
||||
python-version:
|
||||
required: true
|
||||
type: string
|
||||
description: "Python version to use"
|
||||
default: "3.11"
|
||||
extended-deps-file:
|
||||
required: true
|
||||
type: choice
|
||||
description: "File to install extended dependencies from"
|
||||
options:
|
||||
- extended_testing_deps.txt
|
||||
- pdf_loader_deps.txt
|
||||
- other_deps.txt
|
||||
|
||||
env:
|
||||
POETRY_VERSION: "1.7.1"
|
||||
@@ -32,60 +45,22 @@ jobs:
|
||||
working-directory: ${{ inputs.working-directory }}
|
||||
cache-key: core
|
||||
|
||||
- name: Install dependencies
|
||||
- name: Install extended dependencies
|
||||
shell: bash
|
||||
run: poetry install --with test,test_integration
|
||||
run: |
|
||||
poetry install --with test,test_integration
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_dependencies/${{ inputs.extended-deps-file }}
|
||||
|
||||
- name: Install deps outside pyproject
|
||||
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
|
||||
shell: bash
|
||||
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
|
||||
|
||||
- name: 'Authenticate to Google Cloud'
|
||||
id: 'auth'
|
||||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||
|
||||
- name: Run integration tests
|
||||
- name: Run extended tests
|
||||
shell: bash
|
||||
env:
|
||||
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
|
||||
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
|
||||
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
|
||||
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
|
||||
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
|
||||
HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
|
||||
EXA_API_KEY: ${{ secrets.EXA_API_KEY }}
|
||||
NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }}
|
||||
WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }}
|
||||
WATSONX_PROJECT_ID: ${{ secrets.WATSONX_PROJECT_ID }}
|
||||
PINECONE_API_KEY: ${{ secrets.PINECONE_API_KEY }}
|
||||
PINECONE_ENVIRONMENT: ${{ secrets.PINECONE_ENVIRONMENT }}
|
||||
ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
|
||||
ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
|
||||
ASTRA_DB_KEYSPACE: ${{ secrets.ASTRA_DB_KEYSPACE }}
|
||||
ES_URL: ${{ secrets.ES_URL }}
|
||||
ES_CLOUD_ID: ${{ secrets.ES_CLOUD_ID }}
|
||||
ES_API_KEY: ${{ secrets.ES_API_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
|
||||
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
|
||||
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
|
||||
run: |
|
||||
make test
|
||||
make integration_tests
|
||||
|
||||
- name: Ensure the tests did not create any additional files
|
||||
|
||||
2
.github/workflows/_release.yml
vendored
2
.github/workflows/_release.yml
vendored
@@ -85,7 +85,7 @@ jobs:
|
||||
path: langchain
|
||||
sparse-checkout: | # this only grabs files for relevant dir
|
||||
${{ inputs.working-directory }}
|
||||
ref: ${{ github.ref }} # this scopes to just ref'd branch
|
||||
ref: master # this scopes to just master branch
|
||||
fetch-depth: 0 # this fetches entire commit history
|
||||
- name: Check Tags
|
||||
id: check-tags
|
||||
|
||||
2
.github/workflows/check_diffs.yml
vendored
2
.github/workflows/check_diffs.yml
vendored
@@ -139,7 +139,7 @@ jobs:
|
||||
echo "Running extended tests, installing dependencies with poetry..."
|
||||
poetry install --with test
|
||||
poetry run pip install uv
|
||||
poetry run uv pip install -r extended_testing_deps.txt
|
||||
poetry run uv pip install -r extended_dependencies/extended_testing_deps.txt
|
||||
|
||||
- name: Run extended tests
|
||||
run: make extended_tests
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -179,7 +179,7 @@
|
||||
" b: Annotated[int, ..., \"Second integer\"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class multiply(TypedDict):\n",
|
||||
"class multiply(BaseModel):\n",
|
||||
" \"\"\"Multiply two integers.\"\"\"\n",
|
||||
"\n",
|
||||
" a: Annotated[int, ..., \"First integer\"]\n",
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"\n",
|
||||
"This sample demonstrates the use of `Amazon Textract` in combination with LangChain as a DocumentLoader.\n",
|
||||
"\n",
|
||||
"`Textract` supports`PDF`, `TIFF`, `PNG` and `JPEG` format.\n",
|
||||
"`Textract` supports`PDF`, `TIF`F, `PNG` and `JPEG` format.\n",
|
||||
"\n",
|
||||
"`Textract` supports these [document sizes, languages and characters](https://docs.aws.amazon.com/textract/latest/dg/limits-document.html)."
|
||||
]
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"source": [
|
||||
"# Google Speech-to-Text Audio Transcripts\n",
|
||||
"\n",
|
||||
"The `SpeechToTextLoader` allows to transcribe audio files with the [Google Cloud Speech-to-Text API](https://cloud.google.com/speech-to-text) and loads the transcribed text into documents.\n",
|
||||
"The `GoogleSpeechToTextLoader` allows to transcribe audio files with the [Google Cloud Speech-to-Text API](https://cloud.google.com/speech-to-text) and loads the transcribed text into documents.\n",
|
||||
"\n",
|
||||
"To use it, you should have the `google-cloud-speech` python package installed, and a Google Cloud project with the [Speech-to-Text API enabled](https://cloud.google.com/speech-to-text/v2/docs/transcribe-client-libraries#before_you_begin).\n",
|
||||
"\n",
|
||||
@@ -41,7 +41,7 @@
|
||||
"source": [
|
||||
"## Example\n",
|
||||
"\n",
|
||||
"The `SpeechToTextLoader` must include the `project_id` and `file_path` arguments. Audio files can be specified as a Google Cloud Storage URI (`gs://...`) or a local file path.\n",
|
||||
"The `GoogleSpeechToTextLoader` must include the `project_id` and `file_path` arguments. Audio files can be specified as a Google Cloud Storage URI (`gs://...`) or a local file path.\n",
|
||||
"\n",
|
||||
"Only synchronous requests are supported by the loader, which has a [limit of 60 seconds or 10MB](https://cloud.google.com/speech-to-text/v2/docs/sync-recognize#:~:text=60%20seconds%20and/or%2010%20MB) per audio file."
|
||||
]
|
||||
@@ -52,13 +52,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_community import SpeechToTextLoader\n",
|
||||
"from langchain_google_community import GoogleSpeechToTextLoader\n",
|
||||
"\n",
|
||||
"project_id = \"<PROJECT_ID>\"\n",
|
||||
"file_path = \"gs://cloud-samples-data/speech/audio.flac\"\n",
|
||||
"# or a local file path: file_path = \"./audio.wav\"\n",
|
||||
"\n",
|
||||
"loader = SpeechToTextLoader(project_id=project_id, file_path=file_path)\n",
|
||||
"loader = GoogleSpeechToTextLoader(project_id=project_id, file_path=file_path)\n",
|
||||
"\n",
|
||||
"docs = loader.load()"
|
||||
]
|
||||
@@ -152,7 +152,7 @@
|
||||
" RecognitionConfig,\n",
|
||||
" RecognitionFeatures,\n",
|
||||
")\n",
|
||||
"from langchain_google_community import SpeechToTextLoader\n",
|
||||
"from langchain_google_community import GoogleSpeechToTextLoader\n",
|
||||
"\n",
|
||||
"project_id = \"<PROJECT_ID>\"\n",
|
||||
"location = \"global\"\n",
|
||||
@@ -171,7 +171,7 @@
|
||||
" ),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"loader = SpeechToTextLoader(\n",
|
||||
"loader = GoogleSpeechToTextLoader(\n",
|
||||
" project_id=project_id,\n",
|
||||
" location=location,\n",
|
||||
" recognizer_id=recognizer_id,\n",
|
||||
|
||||
@@ -31,8 +31,6 @@ The below document loaders allow you to load webpages.
|
||||
|
||||
The below document loaders allow you to load PDF documents.
|
||||
|
||||
See this guide for a starting point: [How to: load PDF files](/docs/how_to/document_loader_pdf).
|
||||
|
||||
<CategoryTable category="pdf_loaders" />
|
||||
|
||||
## Cloud Providers
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
|
||||
"| :--- | :--- | :---: | :---: | :---: |\n",
|
||||
"| [UnstructuredLoader](https://python.langchain.com/api_reference/unstructured/document_loaders/langchain_unstructured.document_loaders.UnstructuredLoader.html) | [langchain_unstructured](https://python.langchain.com/api_reference/unstructured/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"| [UnstructuredLoader](https://python.langchain.com/api_reference/unstructured/document_loaders/langchain_unstructured.document_loaders.UnstructuredLoader.html) | [langchain_community](https://python.langchain.com/api_reference/unstructured/index.html) | ✅ | ❌ | ✅ | \n",
|
||||
"### Loader features\n",
|
||||
"| Source | Document Lazy Loading | Native Async Support\n",
|
||||
"| :---: | :---: | :---: | \n",
|
||||
@@ -519,47 +519,6 @@
|
||||
"print(\"Length of text in the document:\", len(docs[0].page_content))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3ec3c22d-02cd-498b-921f-b839d1404f32",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading web pages\n",
|
||||
"\n",
|
||||
"`UnstructuredLoader` accepts a `web_url` kwarg when run locally that populates the `url` parameter of the underlying Unstructured [partition](https://docs.unstructured.io/open-source/core-functionality/partitioning). This allows for the parsing of remotely hosted documents, such as HTML web pages.\n",
|
||||
"\n",
|
||||
"Example usage:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "bf9a8546-659d-4861-bff2-fdf1ad93ac65",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"page_content='Example Domain' metadata={'category_depth': 0, 'languages': ['eng'], 'filetype': 'text/html', 'url': 'https://www.example.com', 'category': 'Title', 'element_id': 'fdaa78d856f9d143aeeed85bf23f58f8'}\n",
|
||||
"\n",
|
||||
"page_content='This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission.' metadata={'languages': ['eng'], 'parent_id': 'fdaa78d856f9d143aeeed85bf23f58f8', 'filetype': 'text/html', 'url': 'https://www.example.com', 'category': 'NarrativeText', 'element_id': '3652b8458b0688639f973fe36253c992'}\n",
|
||||
"\n",
|
||||
"page_content='More information...' metadata={'category_depth': 0, 'link_texts': ['More information...'], 'link_urls': ['https://www.iana.org/domains/example'], 'languages': ['eng'], 'filetype': 'text/html', 'url': 'https://www.example.com', 'category': 'Title', 'element_id': '793ab98565d6f6d6f3a6d614e3ace2a9'}\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_unstructured import UnstructuredLoader\n",
|
||||
"\n",
|
||||
"loader = UnstructuredLoader(web_url=\"https://www.example.com\")\n",
|
||||
"docs = loader.load()\n",
|
||||
"\n",
|
||||
"for doc in docs:\n",
|
||||
" print(f\"{doc}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce01aa40",
|
||||
@@ -587,7 +546,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.10.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -6,11 +6,129 @@
|
||||
"source": [
|
||||
"# SambaNova\n",
|
||||
"\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [Sambastudio](https://sambanova.ai/technology/full-stack-ai-platform) is a platform for running your own open-source models\n",
|
||||
"**[SambaNova](https://sambanova.ai/)'s** [Sambaverse](https://sambaverse.sambanova.ai/) and [Sambastudio](https://sambanova.ai/technology/full-stack-ai-platform) are platforms for running your own open-source models\n",
|
||||
"\n",
|
||||
"This example goes over how to use LangChain to interact with SambaNova models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Sambaverse"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Sambaverse** allows you to interact with multiple open-source models. You can view the list of available models and interact with them in the [playground](https://sambaverse.sambanova.ai/playground).\n",
|
||||
" **Please note that Sambaverse's free offering is performance-limited.** Companies that are ready to evaluate the production tokens-per-second performance, volume throughput, and 10x lower total cost of ownership (TCO) of SambaNova should [contact us](https://sambaverse.sambanova.ai/contact-us) for a non-limited evaluation instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An API key is required to access Sambaverse models. To get a key, create an account at [sambaverse.sambanova.ai](https://sambaverse.sambanova.ai/)\n",
|
||||
"\n",
|
||||
"The [sseclient-py](https://pypi.org/project/sseclient-py/) package is required to run streaming predictions "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --quiet sseclient-py==1.8.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Register your API key as an environment variable:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"sambaverse_api_key = \"<Your sambaverse API key>\"\n",
|
||||
"\n",
|
||||
"# Set the environment variables\n",
|
||||
"os.environ[\"SAMBAVERSE_API_KEY\"] = sambaverse_api_key"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Call Sambaverse models directly from LangChain!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms.sambanova import Sambaverse\n",
|
||||
"\n",
|
||||
"llm = Sambaverse(\n",
|
||||
" sambaverse_model_name=\"Meta/llama-2-7b-chat-hf\",\n",
|
||||
" streaming=False,\n",
|
||||
" model_kwargs={\n",
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
" # \"top_p\": 1.0\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(llm.invoke(\"Why should I use open source models?\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Streaming response\n",
|
||||
"\n",
|
||||
"from langchain_community.llms.sambanova import Sambaverse\n",
|
||||
"\n",
|
||||
"llm = Sambaverse(\n",
|
||||
" sambaverse_model_name=\"Meta/llama-2-7b-chat-hf\",\n",
|
||||
" streaming=True,\n",
|
||||
" model_kwargs={\n",
|
||||
" \"do_sample\": True,\n",
|
||||
" \"max_tokens_to_generate\": 1000,\n",
|
||||
" \"temperature\": 0.01,\n",
|
||||
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
|
||||
" \"process_prompt\": False,\n",
|
||||
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
|
||||
" # \"repetition_penalty\": 1.0,\n",
|
||||
" # \"top_k\": 50,\n",
|
||||
" # \"top_p\": 1.0\n",
|
||||
" },\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for chunk in llm.stream(\"Why should I use open source models?\"):\n",
|
||||
" print(chunk, end=\"\", flush=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# MLflow AI Gateway for LLMs
|
||||
# MLflow Deployments for LLMs
|
||||
|
||||
>[The MLflow AI Gateway for LLMs](https://www.mlflow.org/docs/latest/llms/deployments/index.html) is a powerful tool designed to streamline the usage and management of various large
|
||||
>[The MLflow Deployments for LLMs](https://www.mlflow.org/docs/latest/llms/deployments/index.html) is a powerful tool designed to streamline the usage and management of various large
|
||||
> language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface
|
||||
> that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install `mlflow` with MLflow GenAI dependencies:
|
||||
Install `mlflow` with MLflow Deployments dependencies:
|
||||
|
||||
```sh
|
||||
pip install 'mlflow[genai]'
|
||||
@@ -39,10 +39,10 @@ endpoints:
|
||||
openai_api_key: $OPENAI_API_KEY
|
||||
```
|
||||
|
||||
Start the gateway server:
|
||||
Start the deployments server:
|
||||
|
||||
```sh
|
||||
mlflow gateway start --config-path /path/to/config.yaml
|
||||
mlflow deployments start-server --config-path /path/to/config.yaml
|
||||
```
|
||||
|
||||
## Example provided by `MLflow`
|
||||
|
||||
160
docs/docs/integrations/providers/mlflow_ai_gateway.mdx
Normal file
160
docs/docs/integrations/providers/mlflow_ai_gateway.mdx
Normal file
@@ -0,0 +1,160 @@
|
||||
# MLflow AI Gateway
|
||||
|
||||
:::warning
|
||||
|
||||
MLflow AI Gateway has been deprecated. Please use [MLflow Deployments for LLMs](/docs/integrations/providers/mlflow/) instead.
|
||||
|
||||
:::
|
||||
|
||||
>[The MLflow AI Gateway](https://www.mlflow.org/docs/latest/index.html) service is a powerful tool designed to streamline the usage and management of various large
|
||||
> language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface
|
||||
> that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests.
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
Install `mlflow` with MLflow AI Gateway dependencies:
|
||||
|
||||
```sh
|
||||
pip install 'mlflow[gateway]'
|
||||
```
|
||||
|
||||
Set the OpenAI API key as an environment variable:
|
||||
|
||||
```sh
|
||||
export OPENAI_API_KEY=...
|
||||
```
|
||||
|
||||
Create a configuration file:
|
||||
|
||||
```yaml
|
||||
routes:
|
||||
- name: completions
|
||||
route_type: llm/v1/completions
|
||||
model:
|
||||
provider: openai
|
||||
name: text-davinci-003
|
||||
config:
|
||||
openai_api_key: $OPENAI_API_KEY
|
||||
|
||||
- name: embeddings
|
||||
route_type: llm/v1/embeddings
|
||||
model:
|
||||
provider: openai
|
||||
name: text-embedding-ada-002
|
||||
config:
|
||||
openai_api_key: $OPENAI_API_KEY
|
||||
```
|
||||
|
||||
Start the Gateway server:
|
||||
|
||||
```sh
|
||||
mlflow gateway start --config-path /path/to/config.yaml
|
||||
```
|
||||
|
||||
## Example provided by `MLflow`
|
||||
|
||||
>The `mlflow.langchain` module provides an API for logging and loading `LangChain` models.
|
||||
> This module exports multivariate LangChain models in the langchain flavor and univariate LangChain
|
||||
> models in the pyfunc flavor.
|
||||
|
||||
See the [API documentation and examples](https://www.mlflow.org/docs/latest/python_api/mlflow.langchain.html?highlight=langchain#module-mlflow.langchain).
|
||||
|
||||
|
||||
|
||||
## Completions Example
|
||||
|
||||
```python
|
||||
import mlflow
|
||||
from langchain.chains import LLMChain, PromptTemplate
|
||||
from langchain_community.llms import MlflowAIGateway
|
||||
|
||||
gateway = MlflowAIGateway(
|
||||
gateway_uri="http://127.0.0.1:5000",
|
||||
route="completions",
|
||||
params={
|
||||
"temperature": 0.0,
|
||||
"top_p": 0.1,
|
||||
},
|
||||
)
|
||||
|
||||
llm_chain = LLMChain(
|
||||
llm=gateway,
|
||||
prompt=PromptTemplate(
|
||||
input_variables=["adjective"],
|
||||
template="Tell me a {adjective} joke",
|
||||
),
|
||||
)
|
||||
result = llm_chain.run(adjective="funny")
|
||||
print(result)
|
||||
|
||||
with mlflow.start_run():
|
||||
model_info = mlflow.langchain.log_model(chain, "model")
|
||||
|
||||
model = mlflow.pyfunc.load_model(model_info.model_uri)
|
||||
print(model.predict([{"adjective": "funny"}]))
|
||||
```
|
||||
|
||||
## Embeddings Example
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import MlflowAIGatewayEmbeddings
|
||||
|
||||
embeddings = MlflowAIGatewayEmbeddings(
|
||||
gateway_uri="http://127.0.0.1:5000",
|
||||
route="embeddings",
|
||||
)
|
||||
|
||||
print(embeddings.embed_query("hello"))
|
||||
print(embeddings.embed_documents(["hello"]))
|
||||
```
|
||||
|
||||
## Chat Example
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatMLflowAIGateway
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
|
||||
chat = ChatMLflowAIGateway(
|
||||
gateway_uri="http://127.0.0.1:5000",
|
||||
route="chat",
|
||||
params={
|
||||
"temperature": 0.1
|
||||
}
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
content="You are a helpful assistant that translates English to French."
|
||||
),
|
||||
HumanMessage(
|
||||
content="Translate this sentence from English to French: I love programming."
|
||||
),
|
||||
]
|
||||
print(chat(messages))
|
||||
```
|
||||
|
||||
## Databricks MLflow AI Gateway
|
||||
|
||||
Databricks MLflow AI Gateway is in private preview.
|
||||
Please contact a Databricks representative to enroll in the preview.
|
||||
|
||||
```python
|
||||
from langchain.chains import LLMChain
|
||||
from langchain_core.prompts import PromptTemplate
|
||||
from langchain_community.llms import MlflowAIGateway
|
||||
|
||||
gateway = MlflowAIGateway(
|
||||
gateway_uri="databricks",
|
||||
route="completions",
|
||||
)
|
||||
|
||||
llm_chain = LLMChain(
|
||||
llm=gateway,
|
||||
prompt=PromptTemplate(
|
||||
input_variables=["adjective"],
|
||||
template="Tell me a {adjective} joke",
|
||||
),
|
||||
)
|
||||
result = llm_chain.run(adjective="funny")
|
||||
print(result)
|
||||
```
|
||||
File diff suppressed because one or more lines are too long
@@ -400,29 +400,18 @@
|
||||
"def hybrid_query(search_query: str) -> Dict:\n",
|
||||
" vector = embeddings.embed_query(search_query) # same embeddings as for indexing\n",
|
||||
" return {\n",
|
||||
" \"retriever\": {\n",
|
||||
" \"rrf\": {\n",
|
||||
" \"retrievers\": [\n",
|
||||
" {\n",
|
||||
" \"standard\": {\n",
|
||||
" \"query\": {\n",
|
||||
" \"match\": {\n",
|
||||
" text_field: search_query,\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"knn\": {\n",
|
||||
" \"field\": dense_vector_field,\n",
|
||||
" \"query_vector\": vector,\n",
|
||||
" \"k\": 5,\n",
|
||||
" \"num_candidates\": 10,\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" \"query\": {\n",
|
||||
" \"match\": {\n",
|
||||
" text_field: search_query,\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"knn\": {\n",
|
||||
" \"field\": dense_vector_field,\n",
|
||||
" \"query_vector\": vector,\n",
|
||||
" \"k\": 5,\n",
|
||||
" \"num_candidates\": 10,\n",
|
||||
" },\n",
|
||||
" \"rank\": {\"rrf\": {}},\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
||||
@@ -380,7 +380,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all `Clickhouse` features and configurations head to the API reference:https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.clickhouse.Clickhouse.html"
|
||||
"For detailed documentation of all `AstraDBVectorStore` features and configurations head to the API reference:https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.clickhouse.Clickhouse.html"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"source": [
|
||||
" </TabItem>\n",
|
||||
" <TabItem value=\"conda\" label=\"Conda\">\n",
|
||||
" <CodeBlock language=\"bash\">conda install langchain langchain-community langchain-chroma -c conda-forge</CodeBlock>\n",
|
||||
" <CodeBlock language=\"bash\">conda install langchain langchain_community langchain_chroma -c conda-forge</CodeBlock>\n",
|
||||
" </TabItem>\n",
|
||||
"</Tabs>\n",
|
||||
"\n",
|
||||
|
||||
@@ -1,554 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Migrating off ConversationBufferMemory or ConversationStringBufferMemory\n",
|
||||
"\n",
|
||||
"[ConversationBufferMemory](https://python.langchain.com/api_reference/langchain/memory/langchain.memory.buffer.ConversationBufferMemory.html)\n",
|
||||
"and [ConversationStringBufferMemory](https://python.langchain.com/api_reference/langchain/memory/langchain.memory.buffer.ConversationStringBufferMemory.html)\n",
|
||||
" were used to keep track of a conversation between a human and an ai asstistant without any additional processing. \n",
|
||||
"\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"The `ConversationStringBufferMemory` is equivalent to `ConversationBufferMemory` but was targeting LLMs that were not chat models.\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"The methods for handling conversation history using existing modern primitives are:\n",
|
||||
"\n",
|
||||
"1. Using [LangGraph persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/) along with appropriate processing of the message history\n",
|
||||
"2. Using LCEL with [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#) combined with appropriate processing of the message history.\n",
|
||||
"\n",
|
||||
"Most users will find [LangGraph persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/) both easier to use and configure than the equivalent LCEL, especially for more complex use cases."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d07f9459-9fb6-4942-99c9-64558aedd7d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b99b47ec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain-openai langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "717c8673",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e3621b62-a037-42b8-8faa-59575608bb8b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Usage with LLMChain / ConversationChain\n",
|
||||
"\n",
|
||||
"This section shows how to migrate off `ConversationBufferMemory` or `ConversationStringBufferMemory` that's used together with either an `LLMChain` or a `ConversationChain`.\n",
|
||||
"\n",
|
||||
"### Legacy\n",
|
||||
"\n",
|
||||
"Below is example usage of `ConversationBufferMemory` with an `LLMChain` or an equivalent `ConversationChain`.\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b6e1063-cf3a-456a-bf7d-830e5c1d2864",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'text': 'Hello Bob! How can I assist you today?', 'chat_history': [HumanMessage(content='my name is bob', additional_kwargs={}, response_metadata={}), AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, response_metadata={})]}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts.chat import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" MessagesPlaceholder,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"{text}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# highlight-start\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"legacy_chain = LLMChain(\n",
|
||||
" llm=ChatOpenAI(),\n",
|
||||
" prompt=prompt,\n",
|
||||
" # highlight-next-line\n",
|
||||
" memory=memory,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"legacy_result = legacy_chain.invoke({\"text\": \"my name is bob\"})\n",
|
||||
"print(legacy_result)\n",
|
||||
"\n",
|
||||
"legacy_result = legacy_chain.invoke({\"text\": \"what was my name\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "c7fa1618",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Your name is Bob. How can I assist you today, Bob?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"legacy_result[\"text\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3599774f-b56e-4ba3-876c-624f0270b8ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::note\n",
|
||||
"Note that there is no support for separating conversation threads in a single memory object\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"### LangGraph\n",
|
||||
"\n",
|
||||
"The example below shows how to use LangGraph to implement a `ConversationChain` or `LLMChain` with `ConversationBufferMemory`.\n",
|
||||
"\n",
|
||||
"This example assumes that you're already somewhat familiar with `LangGraph`. If you're not, then please see the [LangGraph Quickstart Guide](https://langchain-ai.github.io/langgraph/tutorials/introduction/) for more details.\n",
|
||||
"\n",
|
||||
"`LangGraph` offers a lot of additional functionality (e.g., time-travel and interrupts) and will work well for other more complex (and realistic) architectures.\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "e591965c-c4d7-4df7-966d-4d14bd46e157",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi! I'm bob\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello Bob! How can I assist you today?\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"what was my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Your name is Bob. How can I help you today, Bob?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from IPython.display import Image, display\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"# Define a new graph\n",
|
||||
"workflow = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"# Define a chat model\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" response = model.invoke(state[\"messages\"])\n",
|
||||
" # We return a list, because this will get added to the existing list\n",
|
||||
" return {\"messages\": response}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the two nodes we will cycle between\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Adding memory is straight forward in langgraph!\n",
|
||||
"# highlight-next-line\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"\n",
|
||||
"app = workflow.compile(\n",
|
||||
" # highlight-next-line\n",
|
||||
" checkpointer=memory\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# The thread id is a unique key that identifies\n",
|
||||
"# this particular conversation.\n",
|
||||
"# We'll just generate a random uuid here.\n",
|
||||
"# This enables a single application to manage conversations among multiple users.\n",
|
||||
"thread_id = uuid.uuid4()\n",
|
||||
"# highlight-next-line\n",
|
||||
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage(content=\"hi! I'm bob\")\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()\n",
|
||||
"\n",
|
||||
"# Here, let's confirm that the AI remembers our name!\n",
|
||||
"input_message = HumanMessage(content=\"what was my name?\")\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "9893029f-43f3-4703-89bf-e0e8fa18aff3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"### LCEL RunnableWithMessageHistory\n",
|
||||
"\n",
|
||||
"Alternatively, if you have a simple chain, you can wrap the chat model of the chain within a [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html).\n",
|
||||
"\n",
|
||||
"Please refer to the following [migration guide](/docs/versions/migrating_chains/conversation_chain/) for more information.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"## Usasge with a pre-built agent\n",
|
||||
"\n",
|
||||
"This example shows usage of an Agent Executor with a pre-built agent constructed using the [create_tool_calling_agent](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) function.\n",
|
||||
"\n",
|
||||
"If you are using one of the [old LangChain pre-built agents](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/), you should be able\n",
|
||||
"to replace that code with the new [langgraph pre-built agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/) which leverages\n",
|
||||
"native tool calling capabilities of chat models and will likely work better out of the box.\n",
|
||||
"\n",
|
||||
"### Legacy Usage\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "dc2928de-d7a4-4f87-ab96-59bde9a3829f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'input': 'hi! my name is bob what is my age?', 'chat_history': [HumanMessage(content='hi! my name is bob what is my age?', additional_kwargs={}, response_metadata={}), AIMessage(content='Bob, you are 42 years old.', additional_kwargs={}, response_metadata={})], 'output': 'Bob, you are 42 years old.'}\n",
|
||||
"\n",
|
||||
"{'input': 'do you remember my name?', 'chat_history': [HumanMessage(content='hi! my name is bob what is my age?', additional_kwargs={}, response_metadata={}), AIMessage(content='Bob, you are 42 years old.', additional_kwargs={}, response_metadata={}), HumanMessage(content='do you remember my name?', additional_kwargs={}, response_metadata={}), AIMessage(content='Yes, your name is Bob.', additional_kwargs={}, response_metadata={})], 'output': 'Yes, your name is Bob.'}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import hub\n",
|
||||
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_user_age(name: str) -> str:\n",
|
||||
" \"\"\"Use this tool to find the user's age.\"\"\"\n",
|
||||
" # This is a placeholder for the actual implementation\n",
|
||||
" if \"bob\" in name.lower():\n",
|
||||
" return \"42 years old\"\n",
|
||||
" return \"41 years old\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tools = [get_user_age]\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
" (\"placeholder\", \"{chat_history}\"),\n",
|
||||
" (\"human\", \"{input}\"),\n",
|
||||
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Construct the Tools agent\n",
|
||||
"agent = create_tool_calling_agent(model, tools, prompt)\n",
|
||||
"# Instantiate memory\n",
|
||||
"# highlight-start\n",
|
||||
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"# Create an agent\n",
|
||||
"agent = create_tool_calling_agent(model, tools, prompt)\n",
|
||||
"agent_executor = AgentExecutor(\n",
|
||||
" agent=agent,\n",
|
||||
" tools=tools,\n",
|
||||
" # highlight-next-line\n",
|
||||
" memory=memory, # Pass the memory to the executor\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Verify that the agent can use tools\n",
|
||||
"print(agent_executor.invoke({\"input\": \"hi! my name is bob what is my age?\"}))\n",
|
||||
"print()\n",
|
||||
"# Verify that the agent has access to conversation history.\n",
|
||||
"# The agent should be able to answer that the user's name is bob.\n",
|
||||
"print(agent_executor.invoke({\"input\": \"do you remember my name?\"}))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a4866ae9-e683-44dc-a77b-da1737d3a645",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"### LangGraph\n",
|
||||
"\n",
|
||||
"You can follow the standard LangChain tutorial for [building an agent](/docs/tutorials/agents/) an in depth explanation of how this works.\n",
|
||||
"\n",
|
||||
"This example is shown here explicitly to make it easier for users to compare the legacy implementation vs. the corresponding langgraph implementation.\n",
|
||||
"\n",
|
||||
"This example shows how to add memory to the [pre-built react agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in langgraph.\n",
|
||||
"\n",
|
||||
"For more details, please see the [how to add memory to the prebuilt ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent-memory/) guide in langgraph.\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "bdb29c9b-bc57-4512-9430-c5d5e3f91e3c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi! I'm bob. What is my age?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_user_age (call_oEDwEbIDNdokwqhAV6Azn47c)\n",
|
||||
" Call ID: call_oEDwEbIDNdokwqhAV6Azn47c\n",
|
||||
" Args:\n",
|
||||
" name: bob\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: get_user_age\n",
|
||||
"\n",
|
||||
"42 years old\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Bob, you are 42 years old! If you need any more assistance or information, feel free to ask.\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"do you remember my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Yes, your name is Bob. If you have any other questions or need assistance, feel free to ask!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_user_age(name: str) -> str:\n",
|
||||
" \"\"\"Use this tool to find the user's age.\"\"\"\n",
|
||||
" # This is a placeholder for the actual implementation\n",
|
||||
" if \"bob\" in name.lower():\n",
|
||||
" return \"42 years old\"\n",
|
||||
" return \"41 years old\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# highlight-next-line\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"app = create_react_agent(\n",
|
||||
" model,\n",
|
||||
" tools=[get_user_age],\n",
|
||||
" # highlight-next-line\n",
|
||||
" checkpointer=memory,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# highlight-start\n",
|
||||
"# The thread id is a unique key that identifies\n",
|
||||
"# this particular conversation.\n",
|
||||
"# We'll just generate a random uuid here.\n",
|
||||
"# This enables a single application to manage conversations among multiple users.\n",
|
||||
"thread_id = uuid.uuid4()\n",
|
||||
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"# Tell the AI that our name is Bob, and ask it to use a tool to confirm\n",
|
||||
"# that it's capable of working like an agent.\n",
|
||||
"input_message = HumanMessage(content=\"hi! I'm bob. What is my age?\")\n",
|
||||
"\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()\n",
|
||||
"\n",
|
||||
"# Confirm that the chat bot has access to previous conversation\n",
|
||||
"# and can respond to the user saying that the user's name is Bob.\n",
|
||||
"input_message = HumanMessage(content=\"do you remember my name?\")\n",
|
||||
"\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "87d14cef-a51e-44be-b376-f31b723caaf8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If we use a different thread ID, it'll start a new conversation and the bot will not know our name!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "fe63e424-1111-4f6a-a9c9-0887eb150ab0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi! do you remember my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello! Yes, I remember your name. It's great to see you again! How can I assist you today?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"config = {\"configurable\": {\"thread_id\": \"123456789\"}}\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage(content=\"hi! do you remember my name?\")\n",
|
||||
"\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2717810",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Explore persistence with LangGraph:\n",
|
||||
"\n",
|
||||
"* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)\n",
|
||||
"* [How to add persistence (\"memory\") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)\n",
|
||||
"* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)\n",
|
||||
"* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)\n",
|
||||
"\n",
|
||||
"Add persistence with simple LCEL (favor langgraph for more complex use cases):\n",
|
||||
"\n",
|
||||
"* [How to add message history](/docs/how_to/message_history/)\n",
|
||||
"\n",
|
||||
"Working with message history:\n",
|
||||
"\n",
|
||||
"* [How to trim messages](/docs/how_to/trim_messages)\n",
|
||||
"* [How to filter messages](/docs/how_to/filter_messages/)\n",
|
||||
"* [How to merge message runs](/docs/how_to/merge_message_runs/)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ce4c48e1-b613-4aab-bc2b-617c811fad1d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,728 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Migrating off ConversationBufferWindowMemory or ConversationTokenBufferMemory\n",
|
||||
"\n",
|
||||
"Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Memory Type | Description |\n",
|
||||
"|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
|
||||
"| `ConversationBufferWindowMemory` | Keeps the last `n` messages of the conversation. Drops the oldest messages when there are more than `n` messages. |\n",
|
||||
"| `ConversationTokenBufferMemory` | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n",
|
||||
"\n",
|
||||
"`ConversationBufferWindowMemory` and `ConversationTokenBufferMemory` apply additional processing on top of the raw conversation history to trim the conversation history to a size that fits inside the context window of a chat model. \n",
|
||||
"\n",
|
||||
"This processing functionality can be accomplished using LangChain's built-in [trim_messages](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.trim_messages.html) function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79935247-acc7-4a05-a387-5d72c9c8c8cb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
":::important\n",
|
||||
"\n",
|
||||
"We’ll begin by exploring a straightforward method that involves applying processing logic to the entire conversation history.\n",
|
||||
"\n",
|
||||
"While this approach is easy to implement, it has a downside: as the conversation grows, so does the latency, since the logic is re-applied to all previous exchanges in the conversation at each turn.\n",
|
||||
"\n",
|
||||
"More advanced strategies focus on incrementally updating the conversation history to avoid redundant processing.\n",
|
||||
"\n",
|
||||
"For instance, the langgraph [how-to guide on summarization](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/) demonstrates\n",
|
||||
"how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns.\n",
|
||||
":::"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d07f9459-9fb6-4942-99c9-64558aedd7d4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set up"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "b99b47ec",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"%pip install --upgrade --quiet langchain-openai langchain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "7127478f-4413-48be-bfec-d0cd91b8cf70",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
"if \"OPENAI_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d6a7bc93-21a9-44c8-842e-9cc82f1ada7c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Legacy usage with LLMChain / Conversation Chain\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "371616e1-ca41-4a57-99e0-5fbf7d63f2ad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'text': 'Nice to meet you, Bob! How can I assist you today?', 'chat_history': []}\n",
|
||||
"{'text': 'Your name is Bob. How can I assist you further, Bob?', 'chat_history': [HumanMessage(content='my name is bob', additional_kwargs={}, response_metadata={}), AIMessage(content='Nice to meet you, Bob! How can I assist you today?', additional_kwargs={}, response_metadata={})]}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts.chat import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" MessagesPlaceholder,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate(\n",
|
||||
" [\n",
|
||||
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"{text}\"),\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# highlight-start\n",
|
||||
"memory = ConversationBufferWindowMemory(memory_key=\"chat_history\", return_messages=True)\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"legacy_chain = LLMChain(\n",
|
||||
" llm=ChatOpenAI(),\n",
|
||||
" prompt=prompt,\n",
|
||||
" # highlight-next-line\n",
|
||||
" memory=memory,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"legacy_result = legacy_chain.invoke({\"text\": \"my name is bob\"})\n",
|
||||
"print(legacy_result)\n",
|
||||
"\n",
|
||||
"legacy_result = legacy_chain.invoke({\"text\": \"what was my name\"})\n",
|
||||
"print(legacy_result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f48cac47-c8b6-444c-8e1b-f7115c0b2d8d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"## Reimplementing ConversationBufferWindowMemory logic\n",
|
||||
"\n",
|
||||
"Let's first create appropriate logic to process the conversation history, and then we'll see how to integrate it into an application. You can later replace this basic setup with more advanced logic tailored to your specific needs.\n",
|
||||
"\n",
|
||||
"We'll use `trim_messages` to implement logic that keeps the last `n` messages of the conversation. It will drop the oldest messages when the number of messages exceeds `n`.\n",
|
||||
"\n",
|
||||
"In addition, we will also keep the system message if it's present -- when present, it's the first message in a conversation that includes instructions for the chat model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0a92b3f3-0315-46ac-bb28-d07398dd23ea",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" trim_messages,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(\"i wonder why it's called langchain\"),\n",
|
||||
" AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"why is 42 always the answer?\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"Because it’s the only number that’s constantly right, even when it doesn’t add up!\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"What did the cow say?\"),\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e7ddf8dc-ea27-43e2-8800-9f7c1d4abdc1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"you're a good assistant, you always respond with a joke.\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hmmm let me think.\n",
|
||||
"\n",
|
||||
"Why, he's probably chasing after the last cup of coffee in the office!\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"why is 42 always the answer?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Because it’s the only number that’s constantly right, even when it doesn’t add up!\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What did the cow say?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"\n",
|
||||
"selected_messages = trim_messages(\n",
|
||||
" messages,\n",
|
||||
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
|
||||
" max_tokens=5, # <-- allow up to 5 messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # The start_on is specified\n",
|
||||
" # to make sure we do not generate a sequence where\n",
|
||||
" # a ToolMessage that contains the result of a tool invocation\n",
|
||||
" # appears before the AIMessage that requested a tool invocation\n",
|
||||
" # as this will cause some chat models to raise an error.\n",
|
||||
" start_on=(\"human\", \"ai\"),\n",
|
||||
" include_system=True, # <-- Keep the system message\n",
|
||||
" allow_partial=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for msg in selected_messages:\n",
|
||||
" msg.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18f73819-05e0-41f3-a0e7-a5fd6701d9ef",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Reimplementing ConversationTokenBufferMemory logic\n",
|
||||
"\n",
|
||||
"Here, we'll use `trim_messages` to keeps the system message and the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "6442f74b-2c36-48fd-a3d1-c7c5d18c050f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m System Message \u001b[0m================================\n",
|
||||
"\n",
|
||||
"you're a good assistant, you always respond with a joke.\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"why is 42 always the answer?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Because it’s the only number that’s constantly right, even when it doesn’t add up!\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"What did the cow say?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import trim_messages\n",
|
||||
"\n",
|
||||
"selected_messages = trim_messages(\n",
|
||||
" messages,\n",
|
||||
" # Please see API reference for trim_messages for other ways to specify a token counter.\n",
|
||||
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
|
||||
" max_tokens=80, # <-- token limit\n",
|
||||
" # The start_on is specified\n",
|
||||
" # to make sure we do not generate a sequence where\n",
|
||||
" # a ToolMessage that contains the result of a tool invocation\n",
|
||||
" # appears before the AIMessage that requested a tool invocation\n",
|
||||
" # as this will cause some chat models to raise an error.\n",
|
||||
" start_on=(\"human\", \"ai\"),\n",
|
||||
" strategy=\"last\",\n",
|
||||
" include_system=True, # <-- Keep the system message\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for msg in selected_messages:\n",
|
||||
" msg.pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0f05d272-2d22-44b7-9fa6-e617a48584b4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Modern usage with LangGraph\n",
|
||||
"\n",
|
||||
"The example below shows how to use LangGraph to add simple conversation pre-processing logic.\n",
|
||||
"\n",
|
||||
":::note\n",
|
||||
"\n",
|
||||
"If you want to avoid running the computation on the entire conversation history each time, you can follow\n",
|
||||
"the [how-to guide on summarization](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/) that demonstrates\n",
|
||||
"how to discard older messages, ensuring they aren't re-processed during later turns.\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "7d6f79a3-fda7-48fd-9128-bbe4aad84199",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi! I'm bob\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Hello Bob! How can I assist you today?\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"what was my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Your name is Bob. How can I help you, Bob?\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from IPython.display import Image, display\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.graph import START, MessagesState, StateGraph\n",
|
||||
"\n",
|
||||
"# Define a new graph\n",
|
||||
"workflow = StateGraph(state_schema=MessagesState)\n",
|
||||
"\n",
|
||||
"# Define a chat model\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the function that calls the model\n",
|
||||
"def call_model(state: MessagesState):\n",
|
||||
" # highlight-start\n",
|
||||
" selected_messages = trim_messages(\n",
|
||||
" state[\"messages\"],\n",
|
||||
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
|
||||
" max_tokens=5, # <-- allow up to 5 messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # The start_on is specified\n",
|
||||
" # to make sure we do not generate a sequence where\n",
|
||||
" # a ToolMessage that contains the result of a tool invocation\n",
|
||||
" # appears before the AIMessage that requested a tool invocation\n",
|
||||
" # as this will cause some chat models to raise an error.\n",
|
||||
" start_on=(\"human\", \"ai\"),\n",
|
||||
" include_system=True, # <-- Keep the system message\n",
|
||||
" allow_partial=False,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # highlight-end\n",
|
||||
" response = model.invoke(selected_messages)\n",
|
||||
" # We return a list, because this will get added to the existing list\n",
|
||||
" return {\"messages\": response}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the two nodes we will cycle between\n",
|
||||
"workflow.add_edge(START, \"model\")\n",
|
||||
"workflow.add_node(\"model\", call_model)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Adding memory is straight forward in langgraph!\n",
|
||||
"# highlight-next-line\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"\n",
|
||||
"app = workflow.compile(\n",
|
||||
" # highlight-next-line\n",
|
||||
" checkpointer=memory\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# The thread id is a unique key that identifies\n",
|
||||
"# this particular conversation.\n",
|
||||
"# We'll just generate a random uuid here.\n",
|
||||
"thread_id = uuid.uuid4()\n",
|
||||
"# highlight-next-line\n",
|
||||
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
|
||||
"\n",
|
||||
"input_message = HumanMessage(content=\"hi! I'm bob\")\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()\n",
|
||||
"\n",
|
||||
"# Here, let's confirm that the AI remembers our name!\n",
|
||||
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
|
||||
"input_message = HumanMessage(content=\"what was my name?\")\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84229e2e-a578-4b21-840a-814223406402",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"## Usage with a pre-built langgraph agent\n",
|
||||
"\n",
|
||||
"This example shows usage of an Agent Executor with a pre-built agent constructed using the [create_tool_calling_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) function.\n",
|
||||
"\n",
|
||||
"If you are using one of the [old LangChain pre-built agents](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/), you should be able\n",
|
||||
"to replace that code with the new [langgraph pre-built agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/) which leverages\n",
|
||||
"native tool calling capabilities of chat models and will likely work better out of the box.\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "f671db87-8f01-453e-81fd-4e603140a512",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"hi! I'm bob. What is my age?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" get_user_age (call_jsMvoIFv970DhqqLCJDzPKsp)\n",
|
||||
" Call ID: call_jsMvoIFv970DhqqLCJDzPKsp\n",
|
||||
" Args:\n",
|
||||
" name: bob\n",
|
||||
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
|
||||
"Name: get_user_age\n",
|
||||
"\n",
|
||||
"42 years old\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Bob, you are 42 years old.\n",
|
||||
"================================\u001b[1m Human Message \u001b[0m=================================\n",
|
||||
"\n",
|
||||
"do you remember my name?\n",
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"\n",
|
||||
"Yes, your name is Bob.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import uuid\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" trim_messages,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langgraph.checkpoint.memory import MemorySaver\n",
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def get_user_age(name: str) -> str:\n",
|
||||
" \"\"\"Use this tool to find the user's age.\"\"\"\n",
|
||||
" # This is a placeholder for the actual implementation\n",
|
||||
" if \"bob\" in name.lower():\n",
|
||||
" return \"42 years old\"\n",
|
||||
" return \"41 years old\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"memory = MemorySaver()\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# highlight-start\n",
|
||||
"def state_modifier(state) -> list[BaseMessage]:\n",
|
||||
" \"\"\"Given the agent state, return a list of messages for the chat model.\"\"\"\n",
|
||||
" # We're using the message processor defined above.\n",
|
||||
" return trim_messages(\n",
|
||||
" state[\"messages\"],\n",
|
||||
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
|
||||
" max_tokens=5, # <-- allow up to 5 messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # The start_on is specified\n",
|
||||
" # to make sure we do not generate a sequence where\n",
|
||||
" # a ToolMessage that contains the result of a tool invocation\n",
|
||||
" # appears before the AIMessage that requested a tool invocation\n",
|
||||
" # as this will cause some chat models to raise an error.\n",
|
||||
" start_on=(\"human\", \"ai\"),\n",
|
||||
" include_system=True, # <-- Keep the system message\n",
|
||||
" allow_partial=False,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"app = create_react_agent(\n",
|
||||
" model,\n",
|
||||
" tools=[get_user_age],\n",
|
||||
" checkpointer=memory,\n",
|
||||
" # highlight-next-line\n",
|
||||
" state_modifier=state_modifier,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# The thread id is a unique key that identifies\n",
|
||||
"# this particular conversation.\n",
|
||||
"# We'll just generate a random uuid here.\n",
|
||||
"thread_id = uuid.uuid4()\n",
|
||||
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
|
||||
"\n",
|
||||
"# Tell the AI that our name is Bob, and ask it to use a tool to confirm\n",
|
||||
"# that it's capable of working like an agent.\n",
|
||||
"input_message = HumanMessage(content=\"hi! I'm bob. What is my age?\")\n",
|
||||
"\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()\n",
|
||||
"\n",
|
||||
"# Confirm that the chat bot has access to previous conversation\n",
|
||||
"# and can respond to the user saying that the user's name is Bob.\n",
|
||||
"input_message = HumanMessage(content=\"do you remember my name?\")\n",
|
||||
"\n",
|
||||
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
|
||||
" event[\"messages\"][-1].pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f4d16e09-1d90-4153-8576-6d3996cb5a6c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"## LCEL: Add a preprocessing step\n",
|
||||
"\n",
|
||||
"The simplest way to add complex conversation management is by introducing a pre-processing step in front of the chat model and pass the full conversation history to the pre-processing step.\n",
|
||||
"\n",
|
||||
"This approach is conceptually simple and will work in many situations; for example, if using a [RunnableWithMessageHistory](/docs/how_to/message_history/) instead of wrapping the chat model, wrap the chat model with the pre-processor.\n",
|
||||
"\n",
|
||||
"The obvious downside of this approach is that latency starts to increase as the conversation history grows because of two reasons:\n",
|
||||
"\n",
|
||||
"1. As the conversation gets longer, more data may need to be fetched from whatever store your'e using to store the conversation history (if not storing it in memory).\n",
|
||||
"2. The pre-processing logic will end up doing a lot of redundant computation, repeating computation from previous steps of the conversation.\n",
|
||||
"\n",
|
||||
":::caution\n",
|
||||
"\n",
|
||||
"If you want to use a chat model's tool calling capabilities, remember to bind the tools to the model before adding the history pre-processing step to it!\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"<details open>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "072046bb-3892-4206-8ae5-025e93110dcc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
|
||||
"Tool Calls:\n",
|
||||
" what_did_the_cow_say (call_urHTB5CShhcKz37QiVzNBlIS)\n",
|
||||
" Call ID: call_urHTB5CShhcKz37QiVzNBlIS\n",
|
||||
" Args:\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.messages import (\n",
|
||||
" AIMessage,\n",
|
||||
" BaseMessage,\n",
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
" trim_messages,\n",
|
||||
")\n",
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def what_did_the_cow_say() -> str:\n",
|
||||
" \"\"\"Check to see what the cow said.\"\"\"\n",
|
||||
" return \"foo\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# highlight-start\n",
|
||||
"message_processor = trim_messages( # Returns a Runnable if no messages are provided\n",
|
||||
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
|
||||
" max_tokens=5, # <-- allow up to 5 messages.\n",
|
||||
" strategy=\"last\",\n",
|
||||
" # The start_on is specified\n",
|
||||
" # to make sure we do not generate a sequence where\n",
|
||||
" # a ToolMessage that contains the result of a tool invocation\n",
|
||||
" # appears before the AIMessage that requested a tool invocation\n",
|
||||
" # as this will cause some chat models to raise an error.\n",
|
||||
" start_on=(\"human\", \"ai\"),\n",
|
||||
" include_system=True, # <-- Keep the system message\n",
|
||||
" allow_partial=False,\n",
|
||||
")\n",
|
||||
"# highlight-end\n",
|
||||
"\n",
|
||||
"# Note that we bind tools to the model first!\n",
|
||||
"model_with_tools = model.bind_tools([what_did_the_cow_say])\n",
|
||||
"\n",
|
||||
"# highlight-next-line\n",
|
||||
"model_with_preprocessor = message_processor | model_with_tools\n",
|
||||
"\n",
|
||||
"full_history = [\n",
|
||||
" SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n",
|
||||
" HumanMessage(\"i wonder why it's called langchain\"),\n",
|
||||
" AIMessage(\n",
|
||||
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"why is 42 always the answer?\"),\n",
|
||||
" AIMessage(\n",
|
||||
" \"Because it’s the only number that’s constantly right, even when it doesn’t add up!\"\n",
|
||||
" ),\n",
|
||||
" HumanMessage(\"What did the cow say?\"),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# We pass it explicity to the model_with_preprocesor for illustrative purposes.\n",
|
||||
"# If you're using `RunnableWithMessageHistory` the history will be automatically\n",
|
||||
"# read from the source the you configure.\n",
|
||||
"model_with_preprocessor.invoke(full_history).pretty_print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5da7225a-5e94-4f53-bb0d-86b6b528d150",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"</details>\n",
|
||||
"\n",
|
||||
"If you need to implement more efficient logic and want to use `RunnableWithMessageHistory` for now the way to achieve this\n",
|
||||
"is to subclass from [BaseChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) and\n",
|
||||
"define appropriate logic for `add_messages` (that doesn't simply append the history, but instead re-writes it).\n",
|
||||
"\n",
|
||||
"Unless you have a good reason to implement this solution, you should instead use LangGraph."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b2717810",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next steps\n",
|
||||
"\n",
|
||||
"Explore persistence with LangGraph:\n",
|
||||
"\n",
|
||||
"* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)\n",
|
||||
"* [How to add persistence (\"memory\") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)\n",
|
||||
"* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)\n",
|
||||
"* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)\n",
|
||||
"\n",
|
||||
"Add persistence with simple LCEL (favor langgraph for more complex use cases):\n",
|
||||
"\n",
|
||||
"* [How to add message history](/docs/how_to/message_history/)\n",
|
||||
"\n",
|
||||
"Working with message history:\n",
|
||||
"\n",
|
||||
"* [How to trim messages](/docs/how_to/trim_messages)\n",
|
||||
"* [How to filter messages](/docs/how_to/filter_messages/)\n",
|
||||
"* [How to merge message runs](/docs/how_to/merge_message_runs/)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f4adad0b-3e25-47d9-a8e6-6a9c6c616f14",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Migrating off ConversationSummaryMemory or ConversationSummaryBufferMemory\n",
|
||||
"\n",
|
||||
"Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"| Memory Type | Description |\n",
|
||||
"|---------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
|
||||
"| `ConversationSummaryMemory` | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |\n",
|
||||
"| `ConversationSummaryBufferMemory` | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n",
|
||||
"\n",
|
||||
"Please follow the following [how-to guide on summarization](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/) in LangGraph. \n",
|
||||
"\n",
|
||||
"This guide shows how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# How to migrate from v0.0 memory
|
||||
|
||||
The concept of memory has evolved significantly in LangChain since its initial release.
|
||||
|
||||
Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases:
|
||||
|
||||
| Use Case | Example |
|
||||
|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Managing conversation history | Keep only the last `n` turns of the conversation between the user and the AI. |
|
||||
| Extraction of structured information | Extract structured information from the conversation history, such as a list of facts learned about the user. |
|
||||
| Composite memory implementations | Combine multiple memory sources, e.g., a list of known facts about the user along with facts learned during a given conversation. |
|
||||
|
||||
While the LangChain 0.0.x memory abstractions were useful, they were limited in their capabilities and not well suited for real-world conversational AI applications. These memory abstractions lacked built-in support for multi-user, multi-conversation scenarios, which are essential for practical conversational AI systems.
|
||||
|
||||
This guide will help you migrate your usage of memory implementations from LangChain v0.0.x to the persistence implementations of LangGraph.
|
||||
|
||||
## Why use LangGraph for memory?
|
||||
|
||||
The main advantages of persistence implementation in LangGraph are:
|
||||
|
||||
- Built-in support for multi-user, multi-conversation scenarios which is often a requirement for real-world conversational AI applications.
|
||||
- Ability to save and resume complex state at any time for error recovery, human-in-the-loop workflows, time travel interactions, and more.
|
||||
- Full support for both [LLM](/docs/concepts/#llms) and [chat models](/docs/concepts/#chat-models). In contrast, the v0.0.x memory abstractions were created prior to the existence and widespread adoption of chat model APIs, and so it does not work well with chat models (e.g., fails with tool calling chat models).
|
||||
- Offers a high degree of customization and control over the memory implementation, including the ability to use different backends.
|
||||
|
||||
## Migrations
|
||||
|
||||
:::info Prerequisites
|
||||
|
||||
These guides assume some familiarity with the following concepts:
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/)
|
||||
- [v0.0.x Memory](https://python.langchain.com/v0.1/docs/modules/memory/)
|
||||
- [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)
|
||||
:::
|
||||
|
||||
### 1. Managing conversation history
|
||||
|
||||
The goal of managing conversation history is to store and retrieve the history in a way that is optimal for a chat model to use.
|
||||
|
||||
Often this involves trimming and / or summarizing the conversation history to keep the most relevant parts of the conversation while having the conversation fit inside the context window of the chat model.
|
||||
|
||||
Memory classes that fall into this category include:
|
||||
|
||||
| Memory Type | How to Migrate | Description |
|
||||
|-----------------------------------|:-------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `ConversationBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A basic memory implementation that simply stores the conversation history. |
|
||||
| `ConversationStringBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant. |
|
||||
| `ConversationBufferWindowMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full. |
|
||||
| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
|
||||
| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |
|
||||
| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
|
||||
| `VectorStoreRetrieverMemory` | No migration guide yet | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input. |
|
||||
|
||||
|
||||
### 2. Extraction of structured information from the conversation history
|
||||
|
||||
Memory classes that fall into this category include:
|
||||
|
||||
| Memory Type | Description |
|
||||
|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `BaseEntityStore` | An abstract interface that resembles a key-value store. It was used for storing structured information learned during the conversation. The information had to be represented as a dictionary of key-value pairs. |
|
||||
| `ConversationEntityMemory` | Combines the ability to summarize the conversation while extracting structured information from the conversation history. |
|
||||
|
||||
And specific backend implementations of abstractions:
|
||||
|
||||
| Memory Type | Description |
|
||||
|---------------------------|----------------------------------------------------------------------------------------------------------|
|
||||
| `InMemoryEntityStore` | An implementation of `BaseEntityStore` that stores the information in the literal computer memory (RAM). |
|
||||
| `RedisEntityStore` | A specific implementation of `BaseEntityStore` that uses Redis as the backend. |
|
||||
| `SQLiteEntityStore` | A specific implementation of `BaseEntityStore` that uses SQLite as the backend. |
|
||||
| `UpstashRedisEntityStore` | A specific implementation of `BaseEntityStore` that uses Upstash as the backend. |
|
||||
|
||||
These abstractions have not received much development since their initial release. The reason
|
||||
is that for these abstractions to be useful they typically require a lot of specialization for a particular application, so these
|
||||
abstractions are not as widely used as the conversation history management abstractions.
|
||||
|
||||
For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an applications
|
||||
that relies on these abstractions, please open an issue on the LangChain GitHub repository and we'll try to prioritize providing
|
||||
more guidance on how to migrate these abstractions.
|
||||
|
||||
The general strategy for extracting structured information from the conversation history is to use a chat model with tool calling capabilities to extract structured information from the conversation history.
|
||||
The extracted information can then be saved into an appropriate data structure (e.g., a dictionary), and information from it can be retrieved and added into the prompt as needed.
|
||||
|
||||
### 3. Implementations that provide composite logic on top of one or more memory implementations
|
||||
|
||||
Memory classes that fall into this category include:
|
||||
|
||||
| Memory Type | Description |
|
||||
|------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `CombinedMemory` | This abstraction accepted a list of `BaseMemory` and fetched relevant memory information from each of them based on the input. |
|
||||
| `SimpleMemory` | Used to add read-only hard-coded context. Users can simply write this information into the prompt. |
|
||||
| `ReadOnlySharedMemory` | Provided a read-only view of an existing `BaseMemory` implementation. |
|
||||
|
||||
These implementations did not seem to be used widely or provide significant value. Users should be able
|
||||
to re-implement these without too much difficulty in custom code.
|
||||
|
||||
## Related Resources
|
||||
|
||||
Explore persistence with LangGraph:
|
||||
|
||||
* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)
|
||||
* [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)
|
||||
* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)
|
||||
* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)
|
||||
|
||||
Add persistence with simple LCEL (favor langgraph for more complex use cases):
|
||||
|
||||
* [How to add message history](https://python.langchain.com/docs/how_to/message_history/)
|
||||
|
||||
Working with message history:
|
||||
|
||||
* [How to trim messages](https://python.langchain.com/docs/how_to/trim_messages)
|
||||
* [How to filter messages](https://python.langchain.com/docs/how_to/filter_messages/)
|
||||
* [How to merge message runs](https://python.langchain.com/docs/how_to/merge_message_runs/)
|
||||
@@ -168,43 +168,52 @@ const config = {
|
||||
label: "Integrations",
|
||||
},
|
||||
{
|
||||
label: "API Reference",
|
||||
to: "https://python.langchain.com/api_reference/",
|
||||
type: "dropdown",
|
||||
label: "API reference",
|
||||
position: "left",
|
||||
items: [
|
||||
{
|
||||
label: "Latest",
|
||||
to: "https://python.langchain.com/api_reference/reference.html",
|
||||
},
|
||||
{
|
||||
label: "Legacy",
|
||||
href: "https://api.python.langchain.com/"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: "dropdown",
|
||||
label: "More",
|
||||
position: "left",
|
||||
items: [
|
||||
{
|
||||
type: "doc",
|
||||
docId: "contributing/index",
|
||||
label: "Contributing",
|
||||
},
|
||||
{
|
||||
type: "doc",
|
||||
docId: "people",
|
||||
label: "People",
|
||||
},
|
||||
{
|
||||
type: 'html',
|
||||
value: '<hr class="dropdown-separator" style="margin-top: 0.5rem; margin-bottom: 0.5rem">',
|
||||
type: "doc",
|
||||
docId: "contributing/index",
|
||||
label: "Contributing",
|
||||
},
|
||||
{
|
||||
href: "https://docs.smith.langchain.com",
|
||||
label: "LangSmith",
|
||||
label: "Cookbooks",
|
||||
href: "https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md"
|
||||
},
|
||||
{
|
||||
href: "https://langchain-ai.github.io/langgraph/",
|
||||
label: "LangGraph",
|
||||
type: "doc",
|
||||
docId: "additional_resources/tutorials",
|
||||
label: "3rd party tutorials"
|
||||
},
|
||||
{
|
||||
href: "https://smith.langchain.com/hub",
|
||||
label: "LangChain Hub",
|
||||
type: "doc",
|
||||
docId: "additional_resources/youtube",
|
||||
label: "YouTube"
|
||||
},
|
||||
{
|
||||
href: "https://js.langchain.com",
|
||||
label: "LangChain JS/TS",
|
||||
to: "/docs/additional_resources/arxiv_references",
|
||||
label: "arXiv"
|
||||
},
|
||||
]
|
||||
},
|
||||
@@ -228,7 +237,30 @@ const config = {
|
||||
]
|
||||
},
|
||||
{
|
||||
to: "https://chat.langchain.com",
|
||||
type: "dropdown",
|
||||
label: "🦜️🔗",
|
||||
position: "right",
|
||||
items: [
|
||||
{
|
||||
href: "https://smith.langchain.com",
|
||||
label: "LangSmith",
|
||||
},
|
||||
{
|
||||
href: "https://docs.smith.langchain.com/",
|
||||
label: "LangSmith Docs",
|
||||
},
|
||||
{
|
||||
href: "https://smith.langchain.com/hub",
|
||||
label: "LangChain Hub",
|
||||
},
|
||||
{
|
||||
href: "https://js.langchain.com",
|
||||
label: "JS/TS Docs",
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
href: "https://chat.langchain.com",
|
||||
label: "💬",
|
||||
position: "right",
|
||||
},
|
||||
|
||||
@@ -38,9 +38,6 @@
|
||||
--ifm-menu-link-padding-horizontal: 0.5rem;
|
||||
--ifm-menu-link-padding-vertical: 0.5rem;
|
||||
--doc-sidebar-width: 275px !important;
|
||||
|
||||
/* Code block syntax highlighting */
|
||||
--docusaurus-highlighted-code-line-bg: rgb(176, 227, 199);
|
||||
}
|
||||
|
||||
/* For readability concerns, you should choose a lighter palette in dark mode. */
|
||||
@@ -52,9 +49,6 @@
|
||||
--ifm-color-primary-light: #29d5b0;
|
||||
--ifm-color-primary-lighter: #32d8b4;
|
||||
--ifm-color-primary-lightest: #4fddbf;
|
||||
|
||||
/* Code block syntax highlighting */
|
||||
--docusaurus-highlighted-code-line-bg: rgb(14, 73, 60);
|
||||
}
|
||||
|
||||
nav, h1, h2, h3, h4 {
|
||||
|
||||
@@ -354,7 +354,7 @@ const FEATURE_TABLES = {
|
||||
},
|
||||
{
|
||||
name: "Nomic",
|
||||
link: "nomic",
|
||||
link: "cohere",
|
||||
package: "langchain-nomic",
|
||||
apiLink: "https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html"
|
||||
},
|
||||
@@ -886,7 +886,7 @@ const FEATURE_TABLES = {
|
||||
apiLink: "https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html"
|
||||
},
|
||||
{
|
||||
name: "UnstructuredXMLLoader",
|
||||
name: "UnstrucutredXMLLoader",
|
||||
link: "xml",
|
||||
source: "XML files",
|
||||
apiLink: "https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.xml.UnstructuredXMLLoader.html"
|
||||
|
||||
@@ -26,10 +26,6 @@
|
||||
}
|
||||
],
|
||||
"redirects": [
|
||||
{
|
||||
"source": "/v0.3/docs/:path(.*/?)*",
|
||||
"destination": "/docs/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/docs/modules/agents/tools/custom_tools(/?)",
|
||||
"destination": "/docs/how_to/custom_tools/"
|
||||
@@ -77,10 +73,6 @@
|
||||
{
|
||||
"source": "/v0.2/docs/templates/:path(.*/?)*",
|
||||
"destination": "https://github.com/langchain-ai/langchain/tree/master/templates/:path*"
|
||||
},
|
||||
{
|
||||
"source": "/docs/integrations/providers/mlflow_ai_gateway(/?)",
|
||||
"destination": "/docs/integrations/providers/mlflow/"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ test tests:
|
||||
poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
|
||||
integration_tests:
|
||||
poetry run pytest $(TEST_FILE)
|
||||
poetry run pytest -m runs $(TEST_FILE)
|
||||
|
||||
test_watch:
|
||||
poetry run ptw --disable-socket --allow-unix-socket --snapshot-update --now . -- -vv tests/unit_tests
|
||||
|
||||
@@ -15,7 +15,7 @@ LangChain Community contains third-party integrations that implement the base in
|
||||
|
||||
For full documentation see the [API reference](https://api.python.langchain.com/en/stable/community_api_reference.html).
|
||||
|
||||

|
||||

|
||||
|
||||
## 📕 Releases & Versioning
|
||||
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
-r pdf_loader_deps.txt
|
||||
-r other_deps.txt
|
||||
@@ -54,7 +54,6 @@ openapi-pydantic>=0.3.2,<0.4
|
||||
oracle-ads>=2.9.1,<3
|
||||
oracledb>=2.2.0,<3
|
||||
pandas>=2.0.1,<3
|
||||
pdfminer-six>=20221105,<20240706
|
||||
pgvector>=0.1.6,<0.2
|
||||
praw>=7.7.1,<8
|
||||
premai>=0.3.25,<0.4
|
||||
@@ -62,9 +61,6 @@ psychicapi>=0.8.0,<0.9
|
||||
pydantic>=2.7.4,<3
|
||||
py-trello>=0.19.0,<0.20
|
||||
pyjwt>=2.8.0,<3
|
||||
pymupdf>=1.22.3,<2
|
||||
pypdf>=3.4.0,<5
|
||||
pypdfium2>=4.10.0,<5
|
||||
pyspark>=3.4.0,<4
|
||||
rank-bm25>=0.2.2,<0.3
|
||||
rapidfuzz>=3.1.1,<4
|
||||
4
libs/community/extended_dependencies/pdf_loader_deps.txt
Normal file
4
libs/community/extended_dependencies/pdf_loader_deps.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
pdfminer-six>=20221105,<20240706
|
||||
pymupdf>=1.22.3,<2
|
||||
pypdf>=3.4.0,<5
|
||||
pypdfium2>=4.10.0,<5
|
||||
@@ -8,18 +8,6 @@ from langchain_core.messages import AIMessage
|
||||
from langchain_core.outputs import ChatGeneration, LLMResult
|
||||
|
||||
MODEL_COST_PER_1K_TOKENS = {
|
||||
# OpenAI o1-preview input
|
||||
"o1-preview": 0.015,
|
||||
"o1-preview-2024-09-12": 0.015,
|
||||
# OpenAI o1-preview output
|
||||
"o1-preview-completion": 0.06,
|
||||
"o1-preview-2024-09-12-completion": 0.06,
|
||||
# OpenAI o1-mini input
|
||||
"o1-mini": 0.003,
|
||||
"o1-mini-2024-09-12": 0.003,
|
||||
# OpenAI o1-mini output
|
||||
"o1-mini-completion": 0.012,
|
||||
"o1-mini-2024-09-12-completion": 0.012,
|
||||
# GPT-4o-mini input
|
||||
"gpt-4o-mini": 0.00015,
|
||||
"gpt-4o-mini-2024-07-18": 0.00015,
|
||||
@@ -165,7 +153,6 @@ def standardize_model_name(
|
||||
model_name.startswith("gpt-4")
|
||||
or model_name.startswith("gpt-3.5")
|
||||
or model_name.startswith("gpt-35")
|
||||
or model_name.startswith("o1-")
|
||||
or ("finetuned" in model_name and "legacy" not in model_name)
|
||||
):
|
||||
return model_name + "-completion"
|
||||
|
||||
@@ -53,15 +53,13 @@ class LLMThoughtLabeler:
|
||||
labeling logic.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def get_initial_label() -> str:
|
||||
def get_initial_label(self) -> str:
|
||||
"""Return the markdown label for a new LLMThought that doesn't have
|
||||
an associated tool yet.
|
||||
"""
|
||||
return f"{THINKING_EMOJI} **Thinking...**"
|
||||
|
||||
@staticmethod
|
||||
def get_tool_label(tool: ToolRecord, is_complete: bool) -> str:
|
||||
def get_tool_label(self, tool: ToolRecord, is_complete: bool) -> str:
|
||||
"""Return the label for an LLMThought that has an associated
|
||||
tool.
|
||||
|
||||
@@ -93,15 +91,13 @@ class LLMThoughtLabeler:
|
||||
label = f"{emoji} **{name}:** {input}"
|
||||
return label
|
||||
|
||||
@staticmethod
|
||||
def get_history_label() -> str:
|
||||
def get_history_label(self) -> str:
|
||||
"""Return a markdown label for the special 'history' container
|
||||
that contains overflow thoughts.
|
||||
"""
|
||||
return f"{HISTORY_EMOJI} **History**"
|
||||
|
||||
@staticmethod
|
||||
def get_final_agent_thought_label() -> str:
|
||||
def get_final_agent_thought_label(self) -> str:
|
||||
"""Return the markdown label for the agent's final thought -
|
||||
the "Now I have the answer" thought, that doesn't involve
|
||||
a tool.
|
||||
|
||||
@@ -204,7 +204,7 @@ def _convert_delta_to_message_chunk(
|
||||
role = dct.get("role")
|
||||
content = dct.get("content", "")
|
||||
additional_kwargs = {}
|
||||
tool_calls = dct.get("tool_calls", None)
|
||||
tool_calls = dct.get("tool_call", None)
|
||||
if tool_calls is not None:
|
||||
additional_kwargs["tool_calls"] = tool_calls
|
||||
|
||||
|
||||
@@ -359,7 +359,6 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from langchain_community.document_loaders.pebblo import (
|
||||
PebbloSafeLoader,
|
||||
PebbloTextLoader,
|
||||
)
|
||||
from langchain_community.document_loaders.polars_dataframe import (
|
||||
PolarsDataFrameLoader,
|
||||
@@ -651,7 +650,6 @@ _module_lookup = {
|
||||
"PDFPlumberLoader": "langchain_community.document_loaders.pdf",
|
||||
"PagedPDFSplitter": "langchain_community.document_loaders.pdf",
|
||||
"PebbloSafeLoader": "langchain_community.document_loaders.pebblo",
|
||||
"PebbloTextLoader": "langchain_community.document_loaders.pebblo",
|
||||
"PlaywrightURLLoader": "langchain_community.document_loaders.url_playwright",
|
||||
"PolarsDataFrameLoader": "langchain_community.document_loaders.polars_dataframe",
|
||||
"PsychicLoader": "langchain_community.document_loaders.psychic",
|
||||
@@ -857,7 +855,6 @@ __all__ = [
|
||||
"PDFPlumberLoader",
|
||||
"PagedPDFSplitter",
|
||||
"PebbloSafeLoader",
|
||||
"PebbloTextLoader",
|
||||
"PlaywrightURLLoader",
|
||||
"PolarsDataFrameLoader",
|
||||
"PsychicLoader",
|
||||
|
||||
@@ -267,7 +267,6 @@ class PyMuPDFParser(BaseBlobParser):
|
||||
|
||||
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
|
||||
"""Lazily parse the blob."""
|
||||
|
||||
import fitz
|
||||
|
||||
with blob.as_bytes_io() as file_path: # type: ignore[attr-defined]
|
||||
@@ -278,49 +277,25 @@ class PyMuPDFParser(BaseBlobParser):
|
||||
|
||||
yield from [
|
||||
Document(
|
||||
page_content=self._get_page_content(doc, page, blob),
|
||||
metadata=self._extract_metadata(doc, page, blob),
|
||||
page_content=page.get_text(**self.text_kwargs)
|
||||
+ self._extract_images_from_page(doc, page),
|
||||
metadata=dict(
|
||||
{
|
||||
"source": blob.source, # type: ignore[attr-defined]
|
||||
"file_path": blob.source, # type: ignore[attr-defined]
|
||||
"page": page.number,
|
||||
"total_pages": len(doc),
|
||||
},
|
||||
**{
|
||||
k: doc.metadata[k]
|
||||
for k in doc.metadata
|
||||
if type(doc.metadata[k]) in [str, int]
|
||||
},
|
||||
),
|
||||
)
|
||||
for page in doc
|
||||
]
|
||||
|
||||
def _get_page_content(
|
||||
self, doc: fitz.fitz.Document, page: fitz.fitz.Page, blob: Blob
|
||||
) -> str:
|
||||
"""
|
||||
Get the text of the page using PyMuPDF and RapidOCR and issue a warning
|
||||
if it is empty.
|
||||
"""
|
||||
content = page.get_text(**self.text_kwargs) + self._extract_images_from_page(
|
||||
doc, page
|
||||
)
|
||||
|
||||
if not content:
|
||||
warnings.warn(
|
||||
f"Warning: Empty content on page "
|
||||
f"{page.number} of document {blob.source}"
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
def _extract_metadata(
|
||||
self, doc: fitz.fitz.Document, page: fitz.fitz.Page, blob: Blob
|
||||
) -> dict:
|
||||
"""Extract metadata from the document and page."""
|
||||
return dict(
|
||||
{
|
||||
"source": blob.source, # type: ignore[attr-defined]
|
||||
"file_path": blob.source, # type: ignore[attr-defined]
|
||||
"page": page.number,
|
||||
"total_pages": len(doc),
|
||||
},
|
||||
**{
|
||||
k: doc.metadata[k]
|
||||
for k in doc.metadata
|
||||
if isinstance(doc.metadata[k], (str, int))
|
||||
},
|
||||
)
|
||||
|
||||
def _extract_images_from_page(
|
||||
self, doc: fitz.fitz.Document, page: fitz.fitz.Page
|
||||
) -> str:
|
||||
|
||||
@@ -4,7 +4,7 @@ import logging
|
||||
import os
|
||||
import uuid
|
||||
from importlib.metadata import version
|
||||
from typing import Any, Dict, Iterable, Iterator, List, Optional
|
||||
from typing import Dict, Iterator, List, Optional
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
@@ -271,67 +271,3 @@ class PebbloSafeLoader(BaseLoader):
|
||||
doc_metadata["pb_checksum"] = classified_docs.get(doc.pb_id, {}).get(
|
||||
"pb_checksum", None
|
||||
)
|
||||
|
||||
|
||||
class PebbloTextLoader(BaseLoader):
|
||||
"""
|
||||
Loader for text data.
|
||||
|
||||
Since PebbloSafeLoader is a wrapper around document loaders, this loader is
|
||||
used to load text data directly into Documents.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
*,
|
||||
source: Optional[str] = None,
|
||||
ids: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
metadatas: Optional[List[Dict[str, Any]]] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
texts: Iterable of text data.
|
||||
source: Source of the text data.
|
||||
Optional. Defaults to None.
|
||||
ids: List of unique identifiers for each text.
|
||||
Optional. Defaults to None.
|
||||
metadata: Metadata for all texts.
|
||||
Optional. Defaults to None.
|
||||
metadatas: List of metadata for each text.
|
||||
Optional. Defaults to None.
|
||||
"""
|
||||
self.texts = texts
|
||||
self.source = source
|
||||
self.ids = ids
|
||||
self.metadata = metadata
|
||||
self.metadatas = metadatas
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""
|
||||
Lazy load text data into Documents.
|
||||
|
||||
Returns:
|
||||
Iterator of Documents
|
||||
"""
|
||||
for i, text in enumerate(self.texts):
|
||||
_id = None
|
||||
metadata = self.metadata or {}
|
||||
if self.metadatas and i < len(self.metadatas) and self.metadatas[i]:
|
||||
metadata.update(self.metadatas[i])
|
||||
if self.ids and i < len(self.ids):
|
||||
_id = self.ids[i]
|
||||
yield Document(id=_id, page_content=text, metadata=metadata)
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""
|
||||
Load text data into Documents.
|
||||
|
||||
Returns:
|
||||
List of Documents
|
||||
"""
|
||||
documents = []
|
||||
for doc in self.lazy_load():
|
||||
documents.append(doc)
|
||||
return documents
|
||||
|
||||
@@ -227,7 +227,7 @@ class RecursiveUrlLoader(BaseLoader):
|
||||
"https://docs.python.org/3.9/",
|
||||
prevent_outside=True,
|
||||
base_url="https://docs.python.org",
|
||||
link_regex=r'<a\\s+(?:[^>]*?\\s+)?href="([^"]*(?=index)[^"]*)"',
|
||||
link_regex=r'<a\s+(?:[^>]*?\s+)?href="([^"]*(?=index)[^"]*)"',
|
||||
exclude_dirs=['https://docs.python.org/3.9/faq']
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
@@ -132,7 +132,6 @@ class BeautifulSoupTransformer(BaseDocumentTransformer):
|
||||
Args:
|
||||
html_content: The original HTML content string.
|
||||
tags: A list of tags to be extracted from the HTML.
|
||||
remove_comments: If set to True, the comments will be removed.
|
||||
|
||||
Returns:
|
||||
A string combining the content of the extracted tags.
|
||||
@@ -185,7 +184,6 @@ def get_navigable_strings(
|
||||
|
||||
Args:
|
||||
element: A BeautifulSoup element.
|
||||
remove_comments: If set to True, the comments will be removed.
|
||||
|
||||
Returns:
|
||||
A generator of strings.
|
||||
|
||||
@@ -213,7 +213,7 @@ class SambaStudioEmbeddings(BaseModel, Embeddings):
|
||||
)
|
||||
try:
|
||||
if params.get("select_expert"):
|
||||
embedding = response.json()["predictions"]
|
||||
embedding = response.json()["predictions"][0]
|
||||
else:
|
||||
embedding = response.json()["predictions"]
|
||||
embeddings.extend(embedding)
|
||||
@@ -299,7 +299,7 @@ class SambaStudioEmbeddings(BaseModel, Embeddings):
|
||||
)
|
||||
try:
|
||||
if params.get("select_expert"):
|
||||
embedding = response.json()["predictions"][0]
|
||||
embedding = response.json()["predictions"][0][0]
|
||||
else:
|
||||
embedding = response.json()["predictions"][0]
|
||||
except KeyError:
|
||||
|
||||
@@ -1,708 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from collections.abc import AsyncIterable, Collection, Iterable, Iterator
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Optional,
|
||||
from langchain_core.graph_vectorstores.base import (
|
||||
GraphVectorStore,
|
||||
GraphVectorStoreRetriever,
|
||||
Node,
|
||||
)
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.callbacks import (
|
||||
AsyncCallbackManagerForRetrieverRun,
|
||||
CallbackManagerForRetrieverRun,
|
||||
)
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.load import Serializable
|
||||
from langchain_core.runnables import run_in_executor
|
||||
from langchain_core.vectorstores import VectorStore, VectorStoreRetriever
|
||||
from pydantic import Field
|
||||
|
||||
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
|
||||
|
||||
|
||||
def _has_next(iterator: Iterator) -> bool:
|
||||
"""Checks if the iterator has more elements.
|
||||
Warning: consumes an element from the iterator"""
|
||||
sentinel = object()
|
||||
return next(iterator, sentinel) is not sentinel
|
||||
|
||||
|
||||
@beta()
|
||||
class Node(Serializable):
|
||||
"""Node in the GraphVectorStore.
|
||||
|
||||
Edges exist from nodes with an outgoing link to nodes with a matching incoming link.
|
||||
|
||||
For instance two nodes `a` and `b` connected over a hyperlink ``https://some-url``
|
||||
would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
[
|
||||
Node(
|
||||
id="a",
|
||||
text="some text a",
|
||||
links= [
|
||||
Link(kind="hyperlink", tag="https://some-url", direction="incoming")
|
||||
],
|
||||
),
|
||||
Node(
|
||||
id="b",
|
||||
text="some text b",
|
||||
links= [
|
||||
Link(kind="hyperlink", tag="https://some-url", direction="outgoing")
|
||||
],
|
||||
)
|
||||
]
|
||||
"""
|
||||
|
||||
id: Optional[str] = None
|
||||
"""Unique ID for the node. Will be generated by the GraphVectorStore if not set."""
|
||||
text: str
|
||||
"""Text contained by the node."""
|
||||
metadata: dict = Field(default_factory=dict)
|
||||
"""Metadata for the node."""
|
||||
links: list[Link] = Field(default_factory=list)
|
||||
"""Links associated with the node."""
|
||||
|
||||
|
||||
def _texts_to_nodes(
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[Iterable[dict]],
|
||||
ids: Optional[Iterable[str]],
|
||||
) -> Iterator[Node]:
|
||||
metadatas_it = iter(metadatas) if metadatas else None
|
||||
ids_it = iter(ids) if ids else None
|
||||
for text in texts:
|
||||
try:
|
||||
_metadata = next(metadatas_it).copy() if metadatas_it else {}
|
||||
except StopIteration as e:
|
||||
raise ValueError("texts iterable longer than metadatas") from e
|
||||
try:
|
||||
_id = next(ids_it) if ids_it else None
|
||||
except StopIteration as e:
|
||||
raise ValueError("texts iterable longer than ids") from e
|
||||
|
||||
links = _metadata.pop(METADATA_LINKS_KEY, [])
|
||||
if not isinstance(links, list):
|
||||
links = list(links)
|
||||
yield Node(
|
||||
id=_id,
|
||||
metadata=_metadata,
|
||||
text=text,
|
||||
links=links,
|
||||
)
|
||||
if ids_it and _has_next(ids_it):
|
||||
raise ValueError("ids iterable longer than texts")
|
||||
if metadatas_it and _has_next(metadatas_it):
|
||||
raise ValueError("metadatas iterable longer than texts")
|
||||
|
||||
|
||||
def _documents_to_nodes(documents: Iterable[Document]) -> Iterator[Node]:
|
||||
for doc in documents:
|
||||
metadata = doc.metadata.copy()
|
||||
links = metadata.pop(METADATA_LINKS_KEY, [])
|
||||
if not isinstance(links, list):
|
||||
links = list(links)
|
||||
yield Node(
|
||||
id=doc.id,
|
||||
metadata=metadata,
|
||||
text=doc.page_content,
|
||||
links=links,
|
||||
)
|
||||
|
||||
|
||||
@beta()
|
||||
def nodes_to_documents(nodes: Iterable[Node]) -> Iterator[Document]:
|
||||
"""Convert nodes to documents.
|
||||
|
||||
Args:
|
||||
nodes: The nodes to convert to documents.
|
||||
Returns:
|
||||
The documents generated from the nodes.
|
||||
"""
|
||||
for node in nodes:
|
||||
metadata = node.metadata.copy()
|
||||
metadata[METADATA_LINKS_KEY] = [
|
||||
# Convert the core `Link` (from the node) back to the local `Link`.
|
||||
Link(kind=link.kind, direction=link.direction, tag=link.tag)
|
||||
for link in node.links
|
||||
]
|
||||
|
||||
yield Document(
|
||||
id=node.id,
|
||||
page_content=node.text,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
|
||||
@beta(message="Added in version 0.2.14 of langchain_core. API subject to change.")
|
||||
class GraphVectorStore(VectorStore):
|
||||
"""A hybrid vector-and-graph graph store.
|
||||
|
||||
Document chunks support vector-similarity search as well as edges linking
|
||||
chunks based on structural and semantic properties.
|
||||
|
||||
.. versionadded:: 0.2.14
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def add_nodes(
|
||||
self,
|
||||
nodes: Iterable[Node],
|
||||
**kwargs: Any,
|
||||
) -> Iterable[str]:
|
||||
"""Add nodes to the graph store.
|
||||
|
||||
Args:
|
||||
nodes: the nodes to add.
|
||||
"""
|
||||
|
||||
async def aadd_nodes(
|
||||
self,
|
||||
nodes: Iterable[Node],
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterable[str]:
|
||||
"""Add nodes to the graph store.
|
||||
|
||||
Args:
|
||||
nodes: the nodes to add.
|
||||
"""
|
||||
iterator = iter(await run_in_executor(None, self.add_nodes, nodes, **kwargs))
|
||||
done = object()
|
||||
while True:
|
||||
doc = await run_in_executor(None, next, iterator, done)
|
||||
if doc is done:
|
||||
break
|
||||
yield doc # type: ignore[misc]
|
||||
|
||||
def add_texts(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[Iterable[dict]] = None,
|
||||
*,
|
||||
ids: Optional[Iterable[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[str]:
|
||||
"""Run more texts through the embeddings and add to the vectorstore.
|
||||
|
||||
The Links present in the metadata field `links` will be extracted to create
|
||||
the `Node` links.
|
||||
|
||||
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
|
||||
function call would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
store.add_texts(
|
||||
ids=["a", "b"],
|
||||
texts=["some text a", "some text b"],
|
||||
metadatas=[
|
||||
{
|
||||
"links": [
|
||||
Link.incoming(kind="hyperlink", tag="https://some-url")
|
||||
]
|
||||
},
|
||||
{
|
||||
"links": [
|
||||
Link.outgoing(kind="hyperlink", tag="https://some-url")
|
||||
]
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
Args:
|
||||
texts: Iterable of strings to add to the vectorstore.
|
||||
metadatas: Optional list of metadatas associated with the texts.
|
||||
The metadata key `links` shall be an iterable of
|
||||
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
|
||||
**kwargs: vectorstore specific parameters.
|
||||
|
||||
Returns:
|
||||
List of ids from adding the texts into the vectorstore.
|
||||
"""
|
||||
nodes = _texts_to_nodes(texts, metadatas, ids)
|
||||
return list(self.add_nodes(nodes, **kwargs))
|
||||
|
||||
async def aadd_texts(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[Iterable[dict]] = None,
|
||||
*,
|
||||
ids: Optional[Iterable[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[str]:
|
||||
"""Run more texts through the embeddings and add to the vectorstore.
|
||||
|
||||
The Links present in the metadata field `links` will be extracted to create
|
||||
the `Node` links.
|
||||
|
||||
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
|
||||
function call would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
await store.aadd_texts(
|
||||
ids=["a", "b"],
|
||||
texts=["some text a", "some text b"],
|
||||
metadatas=[
|
||||
{
|
||||
"links": [
|
||||
Link.incoming(kind="hyperlink", tag="https://some-url")
|
||||
]
|
||||
},
|
||||
{
|
||||
"links": [
|
||||
Link.outgoing(kind="hyperlink", tag="https://some-url")
|
||||
]
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
Args:
|
||||
texts: Iterable of strings to add to the vectorstore.
|
||||
metadatas: Optional list of metadatas associated with the texts.
|
||||
The metadata key `links` shall be an iterable of
|
||||
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
|
||||
**kwargs: vectorstore specific parameters.
|
||||
|
||||
Returns:
|
||||
List of ids from adding the texts into the vectorstore.
|
||||
"""
|
||||
nodes = _texts_to_nodes(texts, metadatas, ids)
|
||||
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
|
||||
|
||||
def add_documents(
|
||||
self,
|
||||
documents: Iterable[Document],
|
||||
**kwargs: Any,
|
||||
) -> list[str]:
|
||||
"""Run more documents through the embeddings and add to the vectorstore.
|
||||
|
||||
The Links present in the document metadata field `links` will be extracted to
|
||||
create the `Node` links.
|
||||
|
||||
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
|
||||
function call would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
store.add_documents(
|
||||
[
|
||||
Document(
|
||||
id="a",
|
||||
page_content="some text a",
|
||||
metadata={
|
||||
"links": [
|
||||
Link.incoming(kind="hyperlink", tag="http://some-url")
|
||||
]
|
||||
}
|
||||
),
|
||||
Document(
|
||||
id="b",
|
||||
page_content="some text b",
|
||||
metadata={
|
||||
"links": [
|
||||
Link.outgoing(kind="hyperlink", tag="http://some-url")
|
||||
]
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
)
|
||||
|
||||
Args:
|
||||
documents: Documents to add to the vectorstore.
|
||||
The document's metadata key `links` shall be an iterable of
|
||||
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
|
||||
|
||||
Returns:
|
||||
List of IDs of the added texts.
|
||||
"""
|
||||
nodes = _documents_to_nodes(documents)
|
||||
return list(self.add_nodes(nodes, **kwargs))
|
||||
|
||||
async def aadd_documents(
|
||||
self,
|
||||
documents: Iterable[Document],
|
||||
**kwargs: Any,
|
||||
) -> list[str]:
|
||||
"""Run more documents through the embeddings and add to the vectorstore.
|
||||
|
||||
The Links present in the document metadata field `links` will be extracted to
|
||||
create the `Node` links.
|
||||
|
||||
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
|
||||
function call would look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
store.add_documents(
|
||||
[
|
||||
Document(
|
||||
id="a",
|
||||
page_content="some text a",
|
||||
metadata={
|
||||
"links": [
|
||||
Link.incoming(kind="hyperlink", tag="http://some-url")
|
||||
]
|
||||
}
|
||||
),
|
||||
Document(
|
||||
id="b",
|
||||
page_content="some text b",
|
||||
metadata={
|
||||
"links": [
|
||||
Link.outgoing(kind="hyperlink", tag="http://some-url")
|
||||
]
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
)
|
||||
|
||||
Args:
|
||||
documents: Documents to add to the vectorstore.
|
||||
The document's metadata key `links` shall be an iterable of
|
||||
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
|
||||
|
||||
Returns:
|
||||
List of IDs of the added texts.
|
||||
"""
|
||||
nodes = _documents_to_nodes(documents)
|
||||
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
|
||||
|
||||
@abstractmethod
|
||||
def traversal_search(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
k: int = 4,
|
||||
depth: int = 1,
|
||||
**kwargs: Any,
|
||||
) -> Iterable[Document]:
|
||||
"""Retrieve documents from traversing this graph store.
|
||||
|
||||
First, `k` nodes are retrieved using a search for each `query` string.
|
||||
Then, additional nodes are discovered up to the given `depth` from those
|
||||
starting nodes.
|
||||
|
||||
Args:
|
||||
query: The query string.
|
||||
k: The number of Documents to return from the initial search.
|
||||
Defaults to 4. Applies to each of the query strings.
|
||||
depth: The maximum depth of edges to traverse. Defaults to 1.
|
||||
Returns:
|
||||
Retrieved documents.
|
||||
"""
|
||||
|
||||
async def atraversal_search(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
k: int = 4,
|
||||
depth: int = 1,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterable[Document]:
|
||||
"""Retrieve documents from traversing this graph store.
|
||||
|
||||
First, `k` nodes are retrieved using a search for each `query` string.
|
||||
Then, additional nodes are discovered up to the given `depth` from those
|
||||
starting nodes.
|
||||
|
||||
Args:
|
||||
query: The query string.
|
||||
k: The number of Documents to return from the initial search.
|
||||
Defaults to 4. Applies to each of the query strings.
|
||||
depth: The maximum depth of edges to traverse. Defaults to 1.
|
||||
Returns:
|
||||
Retrieved documents.
|
||||
"""
|
||||
iterator = iter(
|
||||
await run_in_executor(
|
||||
None, self.traversal_search, query, k=k, depth=depth, **kwargs
|
||||
)
|
||||
)
|
||||
done = object()
|
||||
while True:
|
||||
doc = await run_in_executor(None, next, iterator, done)
|
||||
if doc is done:
|
||||
break
|
||||
yield doc # type: ignore[misc]
|
||||
|
||||
@abstractmethod
|
||||
def mmr_traversal_search(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
k: int = 4,
|
||||
depth: int = 2,
|
||||
fetch_k: int = 100,
|
||||
adjacent_k: int = 10,
|
||||
lambda_mult: float = 0.5,
|
||||
score_threshold: float = float("-inf"),
|
||||
**kwargs: Any,
|
||||
) -> Iterable[Document]:
|
||||
"""Retrieve documents from this graph store using MMR-traversal.
|
||||
|
||||
This strategy first retrieves the top `fetch_k` results by similarity to
|
||||
the question. It then selects the top `k` results based on
|
||||
maximum-marginal relevance using the given `lambda_mult`.
|
||||
|
||||
At each step, it considers the (remaining) documents from `fetch_k` as
|
||||
well as any documents connected by edges to a selected document
|
||||
retrieved based on similarity (a "root").
|
||||
|
||||
Args:
|
||||
query: The query string to search for.
|
||||
k: Number of Documents to return. Defaults to 4.
|
||||
fetch_k: Number of Documents to fetch via similarity.
|
||||
Defaults to 100.
|
||||
adjacent_k: Number of adjacent Documents to fetch.
|
||||
Defaults to 10.
|
||||
depth: Maximum depth of a node (number of edges) from a node
|
||||
retrieved via similarity. Defaults to 2.
|
||||
lambda_mult: Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding to maximum
|
||||
diversity and 1 to minimum diversity. Defaults to 0.5.
|
||||
score_threshold: Only documents with a score greater than or equal
|
||||
this threshold will be chosen. Defaults to negative infinity.
|
||||
"""
|
||||
|
||||
async def ammr_traversal_search(
|
||||
self,
|
||||
query: str,
|
||||
*,
|
||||
k: int = 4,
|
||||
depth: int = 2,
|
||||
fetch_k: int = 100,
|
||||
adjacent_k: int = 10,
|
||||
lambda_mult: float = 0.5,
|
||||
score_threshold: float = float("-inf"),
|
||||
**kwargs: Any,
|
||||
) -> AsyncIterable[Document]:
|
||||
"""Retrieve documents from this graph store using MMR-traversal.
|
||||
|
||||
This strategy first retrieves the top `fetch_k` results by similarity to
|
||||
the question. It then selects the top `k` results based on
|
||||
maximum-marginal relevance using the given `lambda_mult`.
|
||||
|
||||
At each step, it considers the (remaining) documents from `fetch_k` as
|
||||
well as any documents connected by edges to a selected document
|
||||
retrieved based on similarity (a "root").
|
||||
|
||||
Args:
|
||||
query: The query string to search for.
|
||||
k: Number of Documents to return. Defaults to 4.
|
||||
fetch_k: Number of Documents to fetch via similarity.
|
||||
Defaults to 100.
|
||||
adjacent_k: Number of adjacent Documents to fetch.
|
||||
Defaults to 10.
|
||||
depth: Maximum depth of a node (number of edges) from a node
|
||||
retrieved via similarity. Defaults to 2.
|
||||
lambda_mult: Number between 0 and 1 that determines the degree
|
||||
of diversity among the results with 0 corresponding to maximum
|
||||
diversity and 1 to minimum diversity. Defaults to 0.5.
|
||||
score_threshold: Only documents with a score greater than or equal
|
||||
this threshold will be chosen. Defaults to negative infinity.
|
||||
"""
|
||||
iterator = iter(
|
||||
await run_in_executor(
|
||||
None,
|
||||
self.mmr_traversal_search,
|
||||
query,
|
||||
k=k,
|
||||
fetch_k=fetch_k,
|
||||
adjacent_k=adjacent_k,
|
||||
depth=depth,
|
||||
lambda_mult=lambda_mult,
|
||||
score_threshold=score_threshold,
|
||||
**kwargs,
|
||||
)
|
||||
)
|
||||
done = object()
|
||||
while True:
|
||||
doc = await run_in_executor(None, next, iterator, done)
|
||||
if doc is done:
|
||||
break
|
||||
yield doc # type: ignore[misc]
|
||||
|
||||
def similarity_search(
|
||||
self, query: str, k: int = 4, **kwargs: Any
|
||||
) -> list[Document]:
|
||||
return list(self.traversal_search(query, k=k, depth=0))
|
||||
|
||||
def max_marginal_relevance_search(
|
||||
self,
|
||||
query: str,
|
||||
k: int = 4,
|
||||
fetch_k: int = 20,
|
||||
lambda_mult: float = 0.5,
|
||||
**kwargs: Any,
|
||||
) -> list[Document]:
|
||||
return list(
|
||||
self.mmr_traversal_search(
|
||||
query, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, depth=0
|
||||
)
|
||||
)
|
||||
|
||||
async def asimilarity_search(
|
||||
self, query: str, k: int = 4, **kwargs: Any
|
||||
) -> list[Document]:
|
||||
return [doc async for doc in self.atraversal_search(query, k=k, depth=0)]
|
||||
|
||||
def search(self, query: str, search_type: str, **kwargs: Any) -> list[Document]:
|
||||
if search_type == "similarity":
|
||||
return self.similarity_search(query, **kwargs)
|
||||
elif search_type == "similarity_score_threshold":
|
||||
docs_and_similarities = self.similarity_search_with_relevance_scores(
|
||||
query, **kwargs
|
||||
)
|
||||
return [doc for doc, _ in docs_and_similarities]
|
||||
elif search_type == "mmr":
|
||||
return self.max_marginal_relevance_search(query, **kwargs)
|
||||
elif search_type == "traversal":
|
||||
return list(self.traversal_search(query, **kwargs))
|
||||
elif search_type == "mmr_traversal":
|
||||
return list(self.mmr_traversal_search(query, **kwargs))
|
||||
else:
|
||||
raise ValueError(
|
||||
f"search_type of {search_type} not allowed. Expected "
|
||||
"search_type to be 'similarity', 'similarity_score_threshold', "
|
||||
"'mmr' or 'traversal'."
|
||||
)
|
||||
|
||||
async def asearch(
|
||||
self, query: str, search_type: str, **kwargs: Any
|
||||
) -> list[Document]:
|
||||
if search_type == "similarity":
|
||||
return await self.asimilarity_search(query, **kwargs)
|
||||
elif search_type == "similarity_score_threshold":
|
||||
docs_and_similarities = await self.asimilarity_search_with_relevance_scores(
|
||||
query, **kwargs
|
||||
)
|
||||
return [doc for doc, _ in docs_and_similarities]
|
||||
elif search_type == "mmr":
|
||||
return await self.amax_marginal_relevance_search(query, **kwargs)
|
||||
elif search_type == "traversal":
|
||||
return [doc async for doc in self.atraversal_search(query, **kwargs)]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"search_type of {search_type} not allowed. Expected "
|
||||
"search_type to be 'similarity', 'similarity_score_threshold', "
|
||||
"'mmr' or 'traversal'."
|
||||
)
|
||||
|
||||
def as_retriever(self, **kwargs: Any) -> GraphVectorStoreRetriever:
|
||||
"""Return GraphVectorStoreRetriever initialized from this GraphVectorStore.
|
||||
|
||||
Args:
|
||||
**kwargs: Keyword arguments to pass to the search function.
|
||||
Can include:
|
||||
|
||||
- search_type (Optional[str]): Defines the type of search that
|
||||
the Retriever should perform.
|
||||
Can be ``traversal`` (default), ``similarity``, ``mmr``, or
|
||||
``similarity_score_threshold``.
|
||||
- search_kwargs (Optional[Dict]): Keyword arguments to pass to the
|
||||
search function. Can include things like:
|
||||
|
||||
- k(int): Amount of documents to return (Default: 4).
|
||||
- depth(int): The maximum depth of edges to traverse (Default: 1).
|
||||
- score_threshold(float): Minimum relevance threshold
|
||||
for similarity_score_threshold.
|
||||
- fetch_k(int): Amount of documents to pass to MMR algorithm
|
||||
(Default: 20).
|
||||
- lambda_mult(float): Diversity of results returned by MMR;
|
||||
1 for minimum diversity and 0 for maximum. (Default: 0.5).
|
||||
Returns:
|
||||
Retriever for this GraphVectorStore.
|
||||
|
||||
Examples:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# Retrieve documents traversing edges
|
||||
docsearch.as_retriever(
|
||||
search_type="traversal",
|
||||
search_kwargs={'k': 6, 'depth': 3}
|
||||
)
|
||||
|
||||
# Retrieve more documents with higher diversity
|
||||
# Useful if your dataset has many similar documents
|
||||
docsearch.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={'k': 6, 'lambda_mult': 0.25}
|
||||
)
|
||||
|
||||
# Fetch more documents for the MMR algorithm to consider
|
||||
# But only return the top 5
|
||||
docsearch.as_retriever(
|
||||
search_type="mmr",
|
||||
search_kwargs={'k': 5, 'fetch_k': 50}
|
||||
)
|
||||
|
||||
# Only retrieve documents that have a relevance score
|
||||
# Above a certain threshold
|
||||
docsearch.as_retriever(
|
||||
search_type="similarity_score_threshold",
|
||||
search_kwargs={'score_threshold': 0.8}
|
||||
)
|
||||
|
||||
# Only get the single most similar document from the dataset
|
||||
docsearch.as_retriever(search_kwargs={'k': 1})
|
||||
|
||||
"""
|
||||
return GraphVectorStoreRetriever(vectorstore=self, **kwargs)
|
||||
|
||||
|
||||
class GraphVectorStoreRetriever(VectorStoreRetriever):
|
||||
"""Retriever class for GraphVectorStore."""
|
||||
|
||||
vectorstore: GraphVectorStore
|
||||
"""GraphVectorStore to use for retrieval."""
|
||||
search_type: str = "traversal"
|
||||
"""Type of search to perform. Defaults to "traversal"."""
|
||||
allowed_search_types: ClassVar[Collection[str]] = (
|
||||
"similarity",
|
||||
"similarity_score_threshold",
|
||||
"mmr",
|
||||
"traversal",
|
||||
"mmr_traversal",
|
||||
)
|
||||
|
||||
def _get_relevant_documents(
|
||||
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
|
||||
) -> list[Document]:
|
||||
if self.search_type == "traversal":
|
||||
return list(self.vectorstore.traversal_search(query, **self.search_kwargs))
|
||||
elif self.search_type == "mmr_traversal":
|
||||
return list(
|
||||
self.vectorstore.mmr_traversal_search(query, **self.search_kwargs)
|
||||
)
|
||||
else:
|
||||
return super()._get_relevant_documents(query, run_manager=run_manager)
|
||||
|
||||
async def _aget_relevant_documents(
|
||||
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
|
||||
) -> list[Document]:
|
||||
if self.search_type == "traversal":
|
||||
return [
|
||||
doc
|
||||
async for doc in self.vectorstore.atraversal_search(
|
||||
query, **self.search_kwargs
|
||||
)
|
||||
]
|
||||
elif self.search_type == "mmr_traversal":
|
||||
return [
|
||||
doc
|
||||
async for doc in self.vectorstore.ammr_traversal_search(
|
||||
query, **self.search_kwargs
|
||||
)
|
||||
]
|
||||
else:
|
||||
return await super()._aget_relevant_documents(
|
||||
query, run_manager=run_manager
|
||||
)
|
||||
__all__ = ["GraphVectorStore", "GraphVectorStoreRetriever", "Node"]
|
||||
|
||||
@@ -12,12 +12,12 @@ from typing import (
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.embeddings import Embeddings
|
||||
|
||||
from langchain_community.graph_vectorstores.base import (
|
||||
from langchain_core.graph_vectorstores.base import (
|
||||
GraphVectorStore,
|
||||
Node,
|
||||
nodes_to_documents,
|
||||
)
|
||||
|
||||
from langchain_community.utilities.cassandra import SetupMode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -2,11 +2,11 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Union
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.graph_vectorstores.links import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.links import Link
|
||||
|
||||
# TypeAlias is not available in Python 3.9, we can't use that or the newer `type`.
|
||||
GLiNERInput = Union[str, Document]
|
||||
@@ -34,7 +34,7 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
|
||||
.. seealso::
|
||||
|
||||
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
|
||||
- :class:`How to create links between documents <langchain_community.graph_vectorstores.links.Link>`
|
||||
- :class:`How to create links between documents <langchain_core.graph_vectorstores.links.Link>`
|
||||
|
||||
How to link Documents on common named entities
|
||||
==============================================
|
||||
@@ -59,12 +59,12 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
|
||||
|
||||
We can use :meth:`extract_one` on a document to get the links and add the links
|
||||
to the document metadata with
|
||||
:meth:`~langchain_community.graph_vectorstores.links.add_links`::
|
||||
:meth:`~langchain_core.graph_vectorstores.links.add_links`::
|
||||
|
||||
from langchain_community.document_loaders import TextLoader
|
||||
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
|
||||
from langchain_community.graph_vectorstores.extractors import GLiNERLinkExtractor
|
||||
from langchain_community.graph_vectorstores.links import add_links
|
||||
from langchain_core.graph_vectorstores.links import add_links
|
||||
from langchain_text_splitters import CharacterTextSplitter
|
||||
|
||||
loader = TextLoader("state_of_the_union.txt")
|
||||
@@ -87,7 +87,7 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
|
||||
Using LinkExtractorTransformer
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using the :class:`~langchain_community.graph_vectorstores.extractors.link_extractor_transformer.LinkExtractorTransformer`,
|
||||
Using the :class:`~langchain_community.graph_vectorstores.extractors.keybert_link_extractor.LinkExtractorTransformer`,
|
||||
we can simplify the link extraction::
|
||||
|
||||
from langchain_community.document_loaders import TextLoader
|
||||
@@ -113,7 +113,7 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
|
||||
|
||||
{'source': 'state_of_the_union.txt', 'links': [Link(kind='entity:Person', direction='bidir', tag='President Zelenskyy'), Link(kind='entity:Person', direction='bidir', tag='Vladimir Putin')]}
|
||||
|
||||
The documents with named entity links can then be added to a :class:`~langchain_community.graph_vectorstores.base.GraphVectorStore`::
|
||||
The documents with named entity links can then be added to a :class:`~langchain_core.graph_vectorstores.base.GraphVectorStore`::
|
||||
|
||||
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ from typing import Callable, List, Set
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.graph_vectorstores.links import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
@@ -9,7 +10,6 @@ from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor_adapter import (
|
||||
LinkExtractorAdapter,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.links import Link
|
||||
|
||||
# TypeAlias is not available in Python 3.9, we can't use that or the newer `type`.
|
||||
HierarchyInput = List[str]
|
||||
|
||||
@@ -6,8 +6,8 @@ from urllib.parse import urldefrag, urljoin, urlparse
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
from langchain_community.graph_vectorstores import Link
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
@@ -77,7 +77,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
|
||||
.. seealso::
|
||||
|
||||
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
|
||||
- :class:`How to create links between documents <langchain_community.graph_vectorstores.links.Link>`
|
||||
- :class:`How to create links between documents <langchain_core.graph_vectorstores.links.Link>`
|
||||
|
||||
How to link Documents on hyperlinks in HTML
|
||||
===========================================
|
||||
@@ -103,7 +103,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
|
||||
|
||||
We can use :meth:`extract_one` on a document to get the links and add the links
|
||||
to the document metadata with
|
||||
:meth:`~langchain_community.graph_vectorstores.links.add_links`::
|
||||
:meth:`~langchain_core.graph_vectorstores.links.add_links`::
|
||||
|
||||
from langchain_community.document_loaders import AsyncHtmlLoader
|
||||
from langchain_community.graph_vectorstores.extractors import (
|
||||
@@ -148,7 +148,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
|
||||
|
||||
from langchain_community.document_loaders import AsyncHtmlLoader
|
||||
from langchain_community.graph_vectorstores.extractors import HtmlLinkExtractor
|
||||
from langchain_community.graph_vectorstores.links import add_links
|
||||
from langchain_core.graph_vectorstores.links import add_links
|
||||
|
||||
loader = AsyncHtmlLoader(
|
||||
[
|
||||
@@ -176,7 +176,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
|
||||
Using LinkExtractorTransformer
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using the :class:`~langchain_community.graph_vectorstores.extractors.link_extractor_transformer.LinkExtractorTransformer`,
|
||||
Using the :class:`~langchain_community.graph_vectorstores.extractors.keybert_link_extractor.LinkExtractorTransformer`,
|
||||
we can simplify the link extraction::
|
||||
|
||||
from langchain_community.document_loaders import AsyncHtmlLoader
|
||||
@@ -227,7 +227,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
|
||||
|
||||
Found link from https://python.langchain.com/v0.2/docs/integrations/providers/astradb/ to https://docs.datastax.com/en/astra/home/astra.html.
|
||||
|
||||
The documents with URL links can then be added to a :class:`~langchain_community.graph_vectorstores.base.GraphVectorStore`::
|
||||
The documents with URL links can then be added to a :class:`~langchain_core.graph_vectorstores.base.GraphVectorStore`::
|
||||
|
||||
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
|
||||
|
||||
|
||||
@@ -2,11 +2,11 @@ from typing import Any, Dict, Iterable, Optional, Set, Union
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.graph_vectorstores.links import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.links import Link
|
||||
|
||||
KeybertInput = Union[str, Document]
|
||||
|
||||
@@ -37,7 +37,7 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
|
||||
.. seealso::
|
||||
|
||||
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
|
||||
- :class:`How to create links between documents <langchain_community.graph_vectorstores.links.Link>`
|
||||
- :class:`How to create links between documents <langchain_core.graph_vectorstores.links.Link>`
|
||||
|
||||
How to link Documents on common keywords using Keybert
|
||||
======================================================
|
||||
@@ -62,12 +62,12 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
|
||||
|
||||
We can use :meth:`extract_one` on a document to get the links and add the links
|
||||
to the document metadata with
|
||||
:meth:`~langchain_community.graph_vectorstores.links.add_links`::
|
||||
:meth:`~langchain_core.graph_vectorstores.links.add_links`::
|
||||
|
||||
from langchain_community.document_loaders import TextLoader
|
||||
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
|
||||
from langchain_community.graph_vectorstores.extractors import KeybertLinkExtractor
|
||||
from langchain_community.graph_vectorstores.links import add_links
|
||||
from langchain_core.graph_vectorstores.links import add_links
|
||||
from langchain_text_splitters import CharacterTextSplitter
|
||||
|
||||
loader = TextLoader("state_of_the_union.txt")
|
||||
@@ -91,7 +91,7 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
|
||||
Using LinkExtractorTransformer
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using the :class:`~langchain_community.graph_vectorstores.extractors.link_extractor_transformer.LinkExtractorTransformer`,
|
||||
Using the :class:`~langchain_community.graph_vectorstores.extractors.keybert_link_extractor.LinkExtractorTransformer`,
|
||||
we can simplify the link extraction::
|
||||
|
||||
from langchain_community.document_loaders import TextLoader
|
||||
@@ -116,7 +116,7 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
|
||||
|
||||
{'source': 'state_of_the_union.txt', 'links': [Link(kind='kw', direction='bidir', tag='ukraine'), Link(kind='kw', direction='bidir', tag='ukrainian'), Link(kind='kw', direction='bidir', tag='putin'), Link(kind='kw', direction='bidir', tag='vladimir'), Link(kind='kw', direction='bidir', tag='russia')]}
|
||||
|
||||
The documents with keyword links can then be added to a :class:`~langchain_community.graph_vectorstores.base.GraphVectorStore`::
|
||||
The documents with keyword links can then be added to a :class:`~langchain_core.graph_vectorstores.base.GraphVectorStore`::
|
||||
|
||||
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
|
||||
|
||||
|
||||
@@ -4,8 +4,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import Generic, Iterable, Set, TypeVar
|
||||
|
||||
from langchain_core._api import beta
|
||||
|
||||
from langchain_community.graph_vectorstores import Link
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
InputT = TypeVar("InputT")
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from typing import Callable, Iterable, Set, TypeVar
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
from langchain_community.graph_vectorstores import Link
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
|
||||
@@ -3,11 +3,11 @@ from typing import Any, Sequence
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.documents.transformers import BaseDocumentTransformer
|
||||
from langchain_core.graph_vectorstores.links import copy_with_links
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors.link_extractor import (
|
||||
LinkExtractor,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.links import copy_with_links
|
||||
|
||||
|
||||
@beta()
|
||||
|
||||
@@ -1,102 +1,8 @@
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal, Union
|
||||
from langchain_core.graph_vectorstores.links import (
|
||||
Link,
|
||||
add_links,
|
||||
copy_with_links,
|
||||
get_links,
|
||||
)
|
||||
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import Document
|
||||
|
||||
|
||||
@beta()
|
||||
@dataclass(frozen=True)
|
||||
class Link:
|
||||
"""A link to/from a tag of a given tag.
|
||||
|
||||
Edges exist from nodes with an outgoing link to nodes with a matching incoming link.
|
||||
"""
|
||||
|
||||
kind: str
|
||||
"""The kind of link. Allows different extractors to use the same tag name without
|
||||
creating collisions between extractors. For example “keyword” vs “url”."""
|
||||
direction: Literal["in", "out", "bidir"]
|
||||
"""The direction of the link."""
|
||||
tag: str
|
||||
"""The tag of the link."""
|
||||
|
||||
@staticmethod
|
||||
def incoming(kind: str, tag: str) -> "Link":
|
||||
"""Create an incoming link."""
|
||||
return Link(kind=kind, direction="in", tag=tag)
|
||||
|
||||
@staticmethod
|
||||
def outgoing(kind: str, tag: str) -> "Link":
|
||||
"""Create an outgoing link."""
|
||||
return Link(kind=kind, direction="out", tag=tag)
|
||||
|
||||
@staticmethod
|
||||
def bidir(kind: str, tag: str) -> "Link":
|
||||
"""Create a bidirectional link."""
|
||||
return Link(kind=kind, direction="bidir", tag=tag)
|
||||
|
||||
|
||||
METADATA_LINKS_KEY = "links"
|
||||
|
||||
|
||||
@beta()
|
||||
def get_links(doc: Document) -> list[Link]:
|
||||
"""Get the links from a document.
|
||||
|
||||
Args:
|
||||
doc: The document to get the link tags from.
|
||||
Returns:
|
||||
The set of link tags from the document.
|
||||
"""
|
||||
|
||||
links = doc.metadata.setdefault(METADATA_LINKS_KEY, [])
|
||||
if not isinstance(links, list):
|
||||
# Convert to a list and remember that.
|
||||
links = list(links)
|
||||
doc.metadata[METADATA_LINKS_KEY] = links
|
||||
return links
|
||||
|
||||
|
||||
@beta()
|
||||
def add_links(doc: Document, *links: Union[Link, Iterable[Link]]) -> None:
|
||||
"""Add links to the given metadata.
|
||||
|
||||
Args:
|
||||
doc: The document to add the links to.
|
||||
*links: The links to add to the document.
|
||||
"""
|
||||
links_in_metadata = get_links(doc)
|
||||
for link in links:
|
||||
if isinstance(link, Iterable):
|
||||
links_in_metadata.extend(link)
|
||||
else:
|
||||
links_in_metadata.append(link)
|
||||
|
||||
|
||||
@beta()
|
||||
def copy_with_links(doc: Document, *links: Union[Link, Iterable[Link]]) -> Document:
|
||||
"""Return a document with the given links added.
|
||||
|
||||
Args:
|
||||
doc: The document to add the links to.
|
||||
*links: The links to add to the document.
|
||||
|
||||
Returns:
|
||||
A document with a shallow-copy of the metadata with the links added.
|
||||
"""
|
||||
new_links = set(get_links(doc))
|
||||
for link in links:
|
||||
if isinstance(link, Iterable):
|
||||
new_links.update(link)
|
||||
else:
|
||||
new_links.add(link)
|
||||
|
||||
return Document(
|
||||
page_content=doc.page_content,
|
||||
metadata={
|
||||
**doc.metadata,
|
||||
METADATA_LINKS_KEY: list(new_links),
|
||||
},
|
||||
)
|
||||
__all__ = ["Link", "add_links", "get_links", "copy_with_links"]
|
||||
|
||||
@@ -411,9 +411,7 @@ class Neo4jGraph(GraphStore):
|
||||
return self.structured_schema
|
||||
|
||||
def query(
|
||||
self,
|
||||
query: str,
|
||||
params: dict = {},
|
||||
self, query: str, params: dict = {}, retry_on_session_expired: bool = True
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Query Neo4j database.
|
||||
|
||||
@@ -425,44 +423,26 @@ class Neo4jGraph(GraphStore):
|
||||
List[Dict[str, Any]]: The list of dictionaries containing the query results.
|
||||
"""
|
||||
from neo4j import Query
|
||||
from neo4j.exceptions import Neo4jError
|
||||
from neo4j.exceptions import CypherSyntaxError, SessionExpired
|
||||
|
||||
try:
|
||||
data, _, _ = self._driver.execute_query(
|
||||
Query(text=query, timeout=self.timeout),
|
||||
database=self._database,
|
||||
parameters_=params,
|
||||
)
|
||||
json_data = [r.data() for r in data]
|
||||
if self.sanitize:
|
||||
json_data = [value_sanitize(el) for el in json_data]
|
||||
return json_data
|
||||
except Neo4jError as e:
|
||||
if not (
|
||||
(
|
||||
( # isCallInTransactionError
|
||||
e.code == "Neo.DatabaseError.Statement.ExecutionFailed"
|
||||
or e.code
|
||||
== "Neo.DatabaseError.Transaction.TransactionStartFailed"
|
||||
with self._driver.session(database=self._database) as session:
|
||||
try:
|
||||
data = session.run(Query(text=query, timeout=self.timeout), params)
|
||||
json_data = [r.data() for r in data]
|
||||
if self.sanitize:
|
||||
json_data = [value_sanitize(el) for el in json_data]
|
||||
return json_data
|
||||
except CypherSyntaxError as e:
|
||||
raise ValueError(f"Generated Cypher Statement is not valid\n{e}")
|
||||
except (
|
||||
SessionExpired
|
||||
) as e: # Session expired is a transient error that can be retried
|
||||
if retry_on_session_expired:
|
||||
return self.query(
|
||||
query, params=params, retry_on_session_expired=False
|
||||
)
|
||||
and "in an implicit transaction" in e.message
|
||||
)
|
||||
or ( # isPeriodicCommitError
|
||||
e.code == "Neo.ClientError.Statement.SemanticError"
|
||||
and (
|
||||
"in an open transaction is not possible" in e.message
|
||||
or "tried to execute in an explicit transaction" in e.message
|
||||
)
|
||||
)
|
||||
):
|
||||
raise
|
||||
# fallback to allow implicit transactions
|
||||
with self._driver.session() as session:
|
||||
data = session.run(Query(text=query, timeout=self.timeout), params)
|
||||
json_data = [r.data() for r in data]
|
||||
if self.sanitize:
|
||||
json_data = [value_sanitize(el) for el in json_data]
|
||||
return json_data
|
||||
else:
|
||||
raise e
|
||||
|
||||
def refresh_schema(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -510,6 +510,12 @@ def _import_sagemaker_endpoint() -> Type[BaseLLM]:
|
||||
return SagemakerEndpoint
|
||||
|
||||
|
||||
def _import_sambaverse() -> Type[BaseLLM]:
|
||||
from langchain_community.llms.sambanova import Sambaverse
|
||||
|
||||
return Sambaverse
|
||||
|
||||
|
||||
def _import_sambastudio() -> Type[BaseLLM]:
|
||||
from langchain_community.llms.sambanova import SambaStudio
|
||||
|
||||
@@ -811,6 +817,8 @@ def __getattr__(name: str) -> Any:
|
||||
return _import_rwkv()
|
||||
elif name == "SagemakerEndpoint":
|
||||
return _import_sagemaker_endpoint()
|
||||
elif name == "Sambaverse":
|
||||
return _import_sambaverse()
|
||||
elif name == "SambaStudio":
|
||||
return _import_sambastudio()
|
||||
elif name == "SelfHostedPipeline":
|
||||
@@ -946,6 +954,7 @@ __all__ = [
|
||||
"RWKV",
|
||||
"Replicate",
|
||||
"SagemakerEndpoint",
|
||||
"Sambaverse",
|
||||
"SambaStudio",
|
||||
"SelfHostedHuggingFaceLLM",
|
||||
"SelfHostedPipeline",
|
||||
@@ -1042,6 +1051,7 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
|
||||
"replicate": _import_replicate,
|
||||
"rwkv": _import_rwkv,
|
||||
"sagemaker_endpoint": _import_sagemaker_endpoint,
|
||||
"sambaverse": _import_sambaverse,
|
||||
"sambastudio": _import_sambastudio,
|
||||
"self_hosted": _import_self_hosted,
|
||||
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
|
||||
|
||||
@@ -9,6 +9,464 @@ from langchain_core.utils import get_from_dict_or_env, pre_init
|
||||
from pydantic import ConfigDict
|
||||
|
||||
|
||||
class SVEndpointHandler:
|
||||
"""
|
||||
SambaNova Systems Interface for Sambaverse endpoint.
|
||||
|
||||
:param str host_url: Base URL of the DaaS API service
|
||||
"""
|
||||
|
||||
API_BASE_PATH: str = "/api/predict"
|
||||
|
||||
def __init__(self, host_url: str):
|
||||
"""
|
||||
Initialize the SVEndpointHandler.
|
||||
|
||||
:param str host_url: Base URL of the DaaS API service
|
||||
"""
|
||||
self.host_url = host_url
|
||||
self.http_session = requests.Session()
|
||||
|
||||
@staticmethod
|
||||
def _process_response(response: requests.Response) -> Dict:
|
||||
"""
|
||||
Processes the API response and returns the resulting dict.
|
||||
|
||||
All resulting dicts, regardless of success or failure, will contain the
|
||||
`status_code` key with the API response status code.
|
||||
|
||||
If the API returned an error, the resulting dict will contain the key
|
||||
`detail` with the error message.
|
||||
|
||||
If the API call was successful, the resulting dict will contain the key
|
||||
`data` with the response data.
|
||||
|
||||
:param requests.Response response: the response object to process
|
||||
:return: the response dict
|
||||
:type: dict
|
||||
"""
|
||||
result: Dict[str, Any] = {}
|
||||
try:
|
||||
lines_result = response.text.strip().split("\n")
|
||||
text_result = lines_result[-1]
|
||||
if response.status_code == 200 and json.loads(text_result).get("error"):
|
||||
completion = ""
|
||||
for line in lines_result[:-1]:
|
||||
completion += json.loads(line)["result"]["responses"][0][
|
||||
"stream_token"
|
||||
]
|
||||
text_result = lines_result[-2]
|
||||
result = json.loads(text_result)
|
||||
result["result"]["responses"][0]["completion"] = completion
|
||||
else:
|
||||
result = json.loads(text_result)
|
||||
except Exception as e:
|
||||
result["detail"] = str(e)
|
||||
if "status_code" not in result:
|
||||
result["status_code"] = response.status_code
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _process_streaming_response(
|
||||
response: requests.Response,
|
||||
) -> Generator[Dict, None, None]:
|
||||
"""Process the streaming response"""
|
||||
try:
|
||||
for line in response.iter_lines():
|
||||
chunk = json.loads(line)
|
||||
if "status_code" not in chunk:
|
||||
chunk["status_code"] = response.status_code
|
||||
if chunk["status_code"] == 200 and chunk.get("error"):
|
||||
chunk["result"] = {"responses": [{"stream_token": ""}]}
|
||||
return chunk
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error processing streaming response: {e}")
|
||||
|
||||
def _get_full_url(self) -> str:
|
||||
"""
|
||||
Return the full API URL for a given path.
|
||||
:returns: the full API URL for the sub-path
|
||||
:type: str
|
||||
"""
|
||||
return f"{self.host_url}{self.API_BASE_PATH}"
|
||||
|
||||
def nlp_predict(
|
||||
self,
|
||||
key: str,
|
||||
sambaverse_model_name: Optional[str],
|
||||
input: Union[List[str], str],
|
||||
params: Optional[str] = "",
|
||||
stream: bool = False,
|
||||
) -> Dict:
|
||||
"""
|
||||
NLP predict using inline input string.
|
||||
|
||||
:param str project: Project ID in which the endpoint exists
|
||||
:param str endpoint: Endpoint ID
|
||||
:param str key: API Key
|
||||
:param str input_str: Input string
|
||||
:param str params: Input params string
|
||||
:returns: Prediction results
|
||||
:type: dict
|
||||
"""
|
||||
if params:
|
||||
data = {"instance": input, "params": json.loads(params)}
|
||||
else:
|
||||
data = {"instance": input}
|
||||
response = self.http_session.post(
|
||||
self._get_full_url(),
|
||||
headers={
|
||||
"key": key,
|
||||
"Content-Type": "application/json",
|
||||
"modelName": sambaverse_model_name,
|
||||
},
|
||||
json=data,
|
||||
)
|
||||
return SVEndpointHandler._process_response(response)
|
||||
|
||||
def nlp_predict_stream(
|
||||
self,
|
||||
key: str,
|
||||
sambaverse_model_name: Optional[str],
|
||||
input: Union[List[str], str],
|
||||
params: Optional[str] = "",
|
||||
) -> Iterator[Dict]:
|
||||
"""
|
||||
NLP predict using inline input string.
|
||||
|
||||
:param str project: Project ID in which the endpoint exists
|
||||
:param str endpoint: Endpoint ID
|
||||
:param str key: API Key
|
||||
:param str input_str: Input string
|
||||
:param str params: Input params string
|
||||
:returns: Prediction results
|
||||
:type: dict
|
||||
"""
|
||||
if params:
|
||||
data = {"instance": input, "params": json.loads(params)}
|
||||
else:
|
||||
data = {"instance": input}
|
||||
# Streaming output
|
||||
response = self.http_session.post(
|
||||
self._get_full_url(),
|
||||
headers={
|
||||
"key": key,
|
||||
"Content-Type": "application/json",
|
||||
"modelName": sambaverse_model_name,
|
||||
},
|
||||
json=data,
|
||||
stream=True,
|
||||
)
|
||||
for chunk in SVEndpointHandler._process_streaming_response(response):
|
||||
yield chunk
|
||||
|
||||
|
||||
class Sambaverse(LLM):
|
||||
"""
|
||||
Sambaverse large language models.
|
||||
|
||||
To use, you should have the environment variable ``SAMBAVERSE_API_KEY``
|
||||
set with your API key.
|
||||
|
||||
get one in https://sambaverse.sambanova.ai
|
||||
read extra documentation in https://docs.sambanova.ai/sambaverse/latest/index.html
|
||||
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.llms.sambanova import Sambaverse
|
||||
Sambaverse(
|
||||
sambaverse_url="https://sambaverse.sambanova.ai",
|
||||
sambaverse_api_key="your-sambaverse-api-key",
|
||||
sambaverse_model_name="Meta/llama-2-7b-chat-hf",
|
||||
streaming: = False
|
||||
model_kwargs={
|
||||
"select_expert": "llama-2-7b-chat-hf",
|
||||
"do_sample": False,
|
||||
"max_tokens_to_generate": 100,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1.0,
|
||||
"repetition_penalty": 1.0,
|
||||
"top_k": 50,
|
||||
"process_prompt": False
|
||||
},
|
||||
)
|
||||
"""
|
||||
|
||||
sambaverse_url: str = ""
|
||||
"""Sambaverse url to use"""
|
||||
|
||||
sambaverse_api_key: str = ""
|
||||
"""sambaverse api key"""
|
||||
|
||||
sambaverse_model_name: Optional[str] = None
|
||||
"""sambaverse expert model to use"""
|
||||
|
||||
model_kwargs: Optional[dict] = None
|
||||
"""Key word arguments to pass to the model."""
|
||||
|
||||
streaming: Optional[bool] = False
|
||||
"""Streaming flag to get streamed response."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
extra="forbid",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def is_lc_serializable(cls) -> bool:
|
||||
return True
|
||||
|
||||
@pre_init
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key exists in environment."""
|
||||
values["sambaverse_url"] = get_from_dict_or_env(
|
||||
values,
|
||||
"sambaverse_url",
|
||||
"SAMBAVERSE_URL",
|
||||
default="https://sambaverse.sambanova.ai",
|
||||
)
|
||||
values["sambaverse_api_key"] = get_from_dict_or_env(
|
||||
values, "sambaverse_api_key", "SAMBAVERSE_API_KEY"
|
||||
)
|
||||
values["sambaverse_model_name"] = get_from_dict_or_env(
|
||||
values, "sambaverse_model_name", "SAMBAVERSE_MODEL_NAME"
|
||||
)
|
||||
return values
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{"model_kwargs": self.model_kwargs}}
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of llm."""
|
||||
return "Sambaverse LLM"
|
||||
|
||||
def _get_tuning_params(self, stop: Optional[List[str]]) -> str:
|
||||
"""
|
||||
Get the tuning parameters to use when calling the LLM.
|
||||
|
||||
Args:
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of the stop substrings.
|
||||
|
||||
Returns:
|
||||
The tuning parameters as a JSON string.
|
||||
"""
|
||||
_model_kwargs = self.model_kwargs or {}
|
||||
_kwarg_stop_sequences = _model_kwargs.get("stop_sequences", [])
|
||||
_stop_sequences = stop or _kwarg_stop_sequences
|
||||
if not _kwarg_stop_sequences:
|
||||
_model_kwargs["stop_sequences"] = ",".join(
|
||||
f'"{x}"' for x in _stop_sequences
|
||||
)
|
||||
tuning_params_dict = {
|
||||
k: {"type": type(v).__name__, "value": str(v)}
|
||||
for k, v in (_model_kwargs.items())
|
||||
}
|
||||
_model_kwargs["stop_sequences"] = _kwarg_stop_sequences
|
||||
tuning_params = json.dumps(tuning_params_dict)
|
||||
return tuning_params
|
||||
|
||||
def _handle_nlp_predict(
|
||||
self,
|
||||
sdk: SVEndpointHandler,
|
||||
prompt: Union[List[str], str],
|
||||
tuning_params: str,
|
||||
) -> str:
|
||||
"""
|
||||
Perform an NLP prediction using the Sambaverse endpoint handler.
|
||||
|
||||
Args:
|
||||
sdk: The SVEndpointHandler to use for the prediction.
|
||||
prompt: The prompt to use for the prediction.
|
||||
tuning_params: The tuning parameters to use for the prediction.
|
||||
|
||||
Returns:
|
||||
The prediction result.
|
||||
|
||||
Raises:
|
||||
ValueError: If the prediction fails.
|
||||
"""
|
||||
response = sdk.nlp_predict(
|
||||
self.sambaverse_api_key, self.sambaverse_model_name, prompt, tuning_params
|
||||
)
|
||||
if response["status_code"] != 200:
|
||||
error = response.get("error")
|
||||
if error:
|
||||
optional_code = error.get("code")
|
||||
optional_details = error.get("details")
|
||||
optional_message = error.get("message")
|
||||
raise RuntimeError(
|
||||
f"Sambanova /complete call failed with status code "
|
||||
f"{response['status_code']}.\n"
|
||||
f"Message: {optional_message}\n"
|
||||
f"Details: {optional_details}\n"
|
||||
f"Code: {optional_code}\n"
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"Sambanova /complete call failed with status code "
|
||||
f"{response['status_code']}."
|
||||
f"{response}."
|
||||
)
|
||||
return response["result"]["responses"][0]["completion"]
|
||||
|
||||
def _handle_completion_requests(
|
||||
self, prompt: Union[List[str], str], stop: Optional[List[str]]
|
||||
) -> str:
|
||||
"""
|
||||
Perform a prediction using the Sambaverse endpoint handler.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to use for the prediction.
|
||||
stop: stop sequences.
|
||||
|
||||
Returns:
|
||||
The prediction result.
|
||||
|
||||
Raises:
|
||||
ValueError: If the prediction fails.
|
||||
"""
|
||||
ss_endpoint = SVEndpointHandler(self.sambaverse_url)
|
||||
tuning_params = self._get_tuning_params(stop)
|
||||
return self._handle_nlp_predict(ss_endpoint, prompt, tuning_params)
|
||||
|
||||
def _handle_nlp_predict_stream(
|
||||
self, sdk: SVEndpointHandler, prompt: Union[List[str], str], tuning_params: str
|
||||
) -> Iterator[GenerationChunk]:
|
||||
"""
|
||||
Perform a streaming request to the LLM.
|
||||
|
||||
Args:
|
||||
sdk: The SVEndpointHandler to use for the prediction.
|
||||
prompt: The prompt to use for the prediction.
|
||||
tuning_params: The tuning parameters to use for the prediction.
|
||||
|
||||
Returns:
|
||||
An iterator of GenerationChunks.
|
||||
"""
|
||||
for chunk in sdk.nlp_predict_stream(
|
||||
self.sambaverse_api_key, self.sambaverse_model_name, prompt, tuning_params
|
||||
):
|
||||
if chunk["status_code"] != 200:
|
||||
error = chunk.get("error")
|
||||
if error:
|
||||
optional_code = error.get("code")
|
||||
optional_details = error.get("details")
|
||||
optional_message = error.get("message")
|
||||
raise ValueError(
|
||||
f"Sambanova /complete call failed with status code "
|
||||
f"{chunk['status_code']}.\n"
|
||||
f"Message: {optional_message}\n"
|
||||
f"Details: {optional_details}\n"
|
||||
f"Code: {optional_code}\n"
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"Sambanova /complete call failed with status code "
|
||||
f"{chunk['status_code']}."
|
||||
f"{chunk}."
|
||||
)
|
||||
text = chunk["result"]["responses"][0]["stream_token"]
|
||||
generated_chunk = GenerationChunk(text=text)
|
||||
yield generated_chunk
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
prompt: Union[List[str], str],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[GenerationChunk]:
|
||||
"""Stream the Sambaverse's LLM on the given prompt.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
run_manager: Callback manager for the run.
|
||||
kwargs: Additional keyword arguments. directly passed
|
||||
to the sambaverse model in API call.
|
||||
|
||||
Returns:
|
||||
An iterator of GenerationChunks.
|
||||
"""
|
||||
ss_endpoint = SVEndpointHandler(self.sambaverse_url)
|
||||
tuning_params = self._get_tuning_params(stop)
|
||||
try:
|
||||
if self.streaming:
|
||||
for chunk in self._handle_nlp_predict_stream(
|
||||
ss_endpoint, prompt, tuning_params
|
||||
):
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(chunk.text)
|
||||
yield chunk
|
||||
else:
|
||||
return
|
||||
except Exception as e:
|
||||
# Handle any errors raised by the inference endpoint
|
||||
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
|
||||
|
||||
def _handle_stream_request(
|
||||
self,
|
||||
prompt: Union[List[str], str],
|
||||
stop: Optional[List[str]],
|
||||
run_manager: Optional[CallbackManagerForLLMRun],
|
||||
kwargs: Dict[str, Any],
|
||||
) -> str:
|
||||
"""
|
||||
Perform a streaming request to the LLM.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to generate from.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of the stop substrings.
|
||||
run_manager: Callback manager for the run.
|
||||
kwargs: Additional keyword arguments. directly passed
|
||||
to the sambaverse model in API call.
|
||||
|
||||
Returns:
|
||||
The model output as a string.
|
||||
"""
|
||||
completion = ""
|
||||
for chunk in self._stream(
|
||||
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
|
||||
):
|
||||
completion += chunk.text
|
||||
return completion
|
||||
|
||||
def _call(
|
||||
self,
|
||||
prompt: Union[List[str], str],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Run the LLM on the given input.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to generate from.
|
||||
stop: Stop words to use when generating. Model output is cut off at the
|
||||
first occurrence of any of the stop substrings.
|
||||
run_manager: Callback manager for the run.
|
||||
kwargs: Additional keyword arguments. directly passed
|
||||
to the sambaverse model in API call.
|
||||
|
||||
Returns:
|
||||
The model output as a string.
|
||||
"""
|
||||
try:
|
||||
if self.streaming:
|
||||
return self._handle_stream_request(prompt, stop, run_manager, kwargs)
|
||||
return self._handle_completion_requests(prompt, stop)
|
||||
except Exception as e:
|
||||
# Handle any errors raised by the inference endpoint
|
||||
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
|
||||
|
||||
|
||||
class SSEndpointHandler:
|
||||
"""
|
||||
SambaNova Systems Interface for SambaStudio model endpoints.
|
||||
@@ -517,7 +975,7 @@ class SambaStudio(LLM):
|
||||
first occurrence of any of the stop substrings.
|
||||
run_manager: Callback manager for the run.
|
||||
kwargs: Additional keyword arguments. directly passed
|
||||
to the sambastudio model in API call.
|
||||
to the sambaverse model in API call.
|
||||
|
||||
Returns:
|
||||
The model output as a string.
|
||||
|
||||
@@ -10,6 +10,7 @@ from pydantic import BaseModel, Field, create_model
|
||||
from typing_extensions import Self
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from databricks.sdk import WorkspaceClient
|
||||
from databricks.sdk.service.catalog import FunctionInfo
|
||||
|
||||
from pydantic import ConfigDict
|
||||
@@ -120,7 +121,7 @@ def _get_tool_name(function: "FunctionInfo") -> str:
|
||||
return tool_name
|
||||
|
||||
|
||||
def _get_default_workspace_client() -> Any:
|
||||
def _get_default_workspace_client() -> "WorkspaceClient":
|
||||
try:
|
||||
from databricks.sdk import WorkspaceClient
|
||||
except ImportError as e:
|
||||
@@ -136,7 +137,7 @@ class UCFunctionToolkit(BaseToolkit):
|
||||
description="The ID of a Databricks SQL Warehouse to execute functions."
|
||||
)
|
||||
|
||||
workspace_client: Any = Field(
|
||||
workspace_client: "WorkspaceClient" = Field(
|
||||
default_factory=_get_default_workspace_client,
|
||||
description="Databricks workspace client.",
|
||||
)
|
||||
|
||||
@@ -69,19 +69,6 @@ class ZenGuardTool(BaseTool):
|
||||
)
|
||||
return v
|
||||
|
||||
@property
|
||||
def _api_key(self) -> str:
|
||||
if self.zenguard_api_key is None:
|
||||
raise ValueError(
|
||||
"API key is required for the ZenGuardTool. "
|
||||
"Please provide the API key by either:\n"
|
||||
"1. Manually specifying it when initializing the tool: "
|
||||
"ZenGuardTool(zenguard_api_key='your_api_key')\n"
|
||||
"2. Setting it as an environment variable:"
|
||||
f" {self._ZENGUARD_API_KEY_ENV_NAME}"
|
||||
)
|
||||
return self.zenguard_api_key
|
||||
|
||||
def _run(
|
||||
self,
|
||||
prompts: List[str],
|
||||
@@ -104,7 +91,7 @@ class ZenGuardTool(BaseTool):
|
||||
response = requests.post(
|
||||
self._ZENGUARD_API_URL_ROOT + postfix,
|
||||
json=json,
|
||||
headers={"x-api-key": self._api_key},
|
||||
headers={"x-api-key": self.zenguard_api_key},
|
||||
timeout=5,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
@@ -24,18 +24,6 @@ class FinancialDatasetsAPIWrapper(BaseModel):
|
||||
data, "financial_datasets_api_key", "FINANCIAL_DATASETS_API_KEY"
|
||||
)
|
||||
|
||||
@property
|
||||
def _api_key(self) -> str:
|
||||
if self.financial_datasets_api_key is None:
|
||||
raise ValueError(
|
||||
"API key is required for the FinancialDatasetsAPIWrapper. "
|
||||
"Please provide the API key by either:\n"
|
||||
"1. Manually specifying it when initializing the wrapper: "
|
||||
"FinancialDatasetsAPIWrapper(financial_datasets_api_key='your_api_key')\n"
|
||||
"2. Setting it as an environment variable: FINANCIAL_DATASETS_API_KEY"
|
||||
)
|
||||
return self.financial_datasets_api_key
|
||||
|
||||
def get_income_statements(
|
||||
self,
|
||||
ticker: str,
|
||||
@@ -59,7 +47,7 @@ class FinancialDatasetsAPIWrapper(BaseModel):
|
||||
)
|
||||
|
||||
# Add the api key to the headers
|
||||
headers = {"X-API-KEY": self._api_key}
|
||||
headers = {"X-API-KEY": self.financial_datasets_api_key}
|
||||
|
||||
# Execute the request
|
||||
response = requests.get(url, headers=headers)
|
||||
@@ -90,7 +78,7 @@ class FinancialDatasetsAPIWrapper(BaseModel):
|
||||
)
|
||||
|
||||
# Add the api key to the headers
|
||||
headers = {"X-API-KEY": self._api_key}
|
||||
headers = {"X-API-KEY": self.financial_datasets_api_key}
|
||||
|
||||
# Execute the request
|
||||
response = requests.get(url, headers=headers)
|
||||
@@ -122,7 +110,7 @@ class FinancialDatasetsAPIWrapper(BaseModel):
|
||||
)
|
||||
|
||||
# Add the api key to the headers
|
||||
headers = {"X-API-KEY": self._api_key}
|
||||
headers = {"X-API-KEY": self.financial_datasets_api_key}
|
||||
|
||||
# Execute the request
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
@@ -443,12 +443,6 @@ class AzureSearch(VectorStore):
|
||||
logger.debug("Nothing to insert, skipping.")
|
||||
return []
|
||||
|
||||
# when `keys` are not passed in and there is `ids` in kwargs, use those instead
|
||||
# base class expects `ids` passed in rather than `keys`
|
||||
# https://github.com/langchain-ai/langchain/blob/4cdaca67dc51dba887289f56c6fead3c1a52f97d/libs/core/langchain_core/vectorstores/base.py#L65
|
||||
if (not keys) and ("ids" in kwargs) and (len(kwargs["ids"]) == len(embeddings)):
|
||||
keys = kwargs["ids"]
|
||||
|
||||
return self.add_embeddings(zip(texts, embeddings), metadatas, keys=keys)
|
||||
|
||||
async def aadd_texts(
|
||||
@@ -473,12 +467,6 @@ class AzureSearch(VectorStore):
|
||||
logger.debug("Nothing to insert, skipping.")
|
||||
return []
|
||||
|
||||
# when `keys` are not passed in and there is `ids` in kwargs, use those instead
|
||||
# base class expects `ids` passed in rather than `keys`
|
||||
# https://github.com/langchain-ai/langchain/blob/4cdaca67dc51dba887289f56c6fead3c1a52f97d/libs/core/langchain_core/vectorstores/base.py#L65
|
||||
if (not keys) and ("ids" in kwargs) and (len(kwargs["ids"]) == len(embeddings)):
|
||||
keys = kwargs["ids"]
|
||||
|
||||
return await self.aadd_embeddings(zip(texts, embeddings), metadatas, keys=keys)
|
||||
|
||||
def add_embeddings(
|
||||
@@ -495,13 +483,9 @@ class AzureSearch(VectorStore):
|
||||
data = []
|
||||
for i, (text, embedding) in enumerate(text_embeddings):
|
||||
# Use provided key otherwise use default key
|
||||
if keys:
|
||||
key = keys[i]
|
||||
else:
|
||||
key = str(uuid.uuid4())
|
||||
# Encoding key for Azure Search valid characters
|
||||
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
|
||||
|
||||
key = keys[i] if keys else str(uuid.uuid4())
|
||||
# Encoding key for Azure Search valid characters
|
||||
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
|
||||
metadata = metadatas[i] if metadatas else {}
|
||||
# Add data to index
|
||||
# Additional metadata to fields mapping
|
||||
|
||||
@@ -65,12 +65,10 @@ class Epsilla(VectorStore):
|
||||
"Please install pyepsilla package with `pip install pyepsilla`."
|
||||
) from e
|
||||
|
||||
if not isinstance(
|
||||
client, (pyepsilla.vectordb.Client, pyepsilla.cloud.client.Vectordb)
|
||||
):
|
||||
if not isinstance(client, pyepsilla.vectordb.Client):
|
||||
raise TypeError(
|
||||
"client should be an instance of pyepsilla.vectordb.Client or "
|
||||
f"pyepsilla.cloud.client.Vectordb, got {type(client)}"
|
||||
f"client should be an instance of pyepsilla.vectordb.Client, "
|
||||
f"got {type(client)}"
|
||||
)
|
||||
|
||||
self._client: vectordb.Client = client
|
||||
|
||||
@@ -595,8 +595,11 @@ class Neo4jVector(VectorStore):
|
||||
query: str,
|
||||
*,
|
||||
params: Optional[dict] = None,
|
||||
retry_on_session_expired: bool = True,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Query Neo4j database with retries and exponential backoff.
|
||||
"""
|
||||
This method sends a Cypher query to the connected Neo4j database
|
||||
and returns the results as a list of dictionaries.
|
||||
|
||||
Args:
|
||||
query (str): The Cypher query to execute.
|
||||
@@ -605,38 +608,24 @@ class Neo4jVector(VectorStore):
|
||||
Returns:
|
||||
List[Dict[str, Any]]: List of dictionaries containing the query results.
|
||||
"""
|
||||
from neo4j import Query
|
||||
from neo4j.exceptions import Neo4jError
|
||||
from neo4j.exceptions import CypherSyntaxError, SessionExpired
|
||||
|
||||
params = params or {}
|
||||
try:
|
||||
data, _, _ = self._driver.execute_query(
|
||||
query, database=self._database, parameters_=params
|
||||
)
|
||||
return [r.data() for r in data]
|
||||
except Neo4jError as e:
|
||||
if not (
|
||||
(
|
||||
( # isCallInTransactionError
|
||||
e.code == "Neo.DatabaseError.Statement.ExecutionFailed"
|
||||
or e.code
|
||||
== "Neo.DatabaseError.Transaction.TransactionStartFailed"
|
||||
with self._driver.session(database=self._database) as session:
|
||||
try:
|
||||
data = session.run(query, params)
|
||||
return [r.data() for r in data]
|
||||
except CypherSyntaxError as e:
|
||||
raise ValueError(f"Cypher Statement is not valid\n{e}")
|
||||
except (
|
||||
SessionExpired
|
||||
) as e: # Session expired is a transient error that can be retried
|
||||
if retry_on_session_expired:
|
||||
return self.query(
|
||||
query, params=params, retry_on_session_expired=False
|
||||
)
|
||||
and "in an implicit transaction" in e.message
|
||||
)
|
||||
or ( # isPeriodicCommitError
|
||||
e.code == "Neo.ClientError.Statement.SemanticError"
|
||||
and (
|
||||
"in an open transaction is not possible" in e.message
|
||||
or "tried to execute in an explicit transaction" in e.message
|
||||
)
|
||||
)
|
||||
):
|
||||
raise
|
||||
# Fallback to allow implicit transactions
|
||||
with self._driver.session() as session:
|
||||
data = session.run(Query(text=query), params)
|
||||
return [r.data() for r in data]
|
||||
else:
|
||||
raise e
|
||||
|
||||
def verify_version(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -144,7 +144,7 @@ class TencentVectorDB(VectorStore):
|
||||
|
||||
In order to use this you need to have a database instance.
|
||||
See the following documentation for details:
|
||||
https://cloud.tencent.com/document/product/1709/104489
|
||||
https://cloud.tencent.com/document/product/1709/94951
|
||||
"""
|
||||
|
||||
field_id: str = "id"
|
||||
|
||||
420
libs/community/poetry.lock
generated
420
libs/community/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
@@ -150,13 +150,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "anyio"
|
||||
version = "4.5.0"
|
||||
version = "4.4.0"
|
||||
description = "High level compatibility layer for multiple asynchronous event loop implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "anyio-4.5.0-py3-none-any.whl", hash = "sha256:fdeb095b7cc5a5563175eedd926ec4ae55413bb4be5770c424af0ba46ccb4a78"},
|
||||
{file = "anyio-4.5.0.tar.gz", hash = "sha256:c5a275fe5ca0afd788001f58fca1e69e29ce706d746e317d660e21f70c530ef9"},
|
||||
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
|
||||
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -166,9 +166,9 @@ sniffio = ">=1.1"
|
||||
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
|
||||
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"]
|
||||
trio = ["trio (>=0.26.1)"]
|
||||
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
|
||||
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
|
||||
trio = ["trio (>=0.23)"]
|
||||
|
||||
[[package]]
|
||||
name = "appnope"
|
||||
@@ -1251,18 +1251,15 @@ zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.10"
|
||||
version = "3.8"
|
||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
|
||||
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
|
||||
{file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"},
|
||||
{file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "8.5.0"
|
||||
@@ -1538,13 +1535,13 @@ notebook = "*"
|
||||
|
||||
[[package]]
|
||||
name = "jupyter-client"
|
||||
version = "8.6.3"
|
||||
version = "8.6.2"
|
||||
description = "Jupyter protocol implementation and client libraries"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"},
|
||||
{file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"},
|
||||
{file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"},
|
||||
{file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1797,8 +1794,8 @@ langchain-core = "^0.3.0"
|
||||
langchain-text-splitters = "^0.3.0"
|
||||
langsmith = "^0.1.17"
|
||||
numpy = [
|
||||
{version = "^1", markers = "python_version < \"3.12\""},
|
||||
{version = "^1.26.0", markers = "python_version >= \"3.12\""},
|
||||
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
||||
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
||||
]
|
||||
pydantic = "^2.7.4"
|
||||
PyYAML = ">=5.3"
|
||||
@@ -1812,7 +1809,7 @@ url = "../langchain"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.2"
|
||||
version = "0.3.0"
|
||||
description = "Building applications with LLMs through composability"
|
||||
optional = false
|
||||
python-versions = ">=3.9,<4.0"
|
||||
@@ -1821,11 +1818,11 @@ develop = true
|
||||
|
||||
[package.dependencies]
|
||||
jsonpatch = "^1.33"
|
||||
langsmith = "^0.1.125"
|
||||
langsmith = "^0.1.117"
|
||||
packaging = ">=23.2,<25"
|
||||
pydantic = [
|
||||
{version = "^2.5.2", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = "^2.7.4", markers = "python_full_version >= \"3.12.4\""},
|
||||
{version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""},
|
||||
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
@@ -1846,7 +1843,7 @@ develop = true
|
||||
|
||||
[package.dependencies]
|
||||
httpx = "^0.27.0"
|
||||
langchain-core = "^0.3.0"
|
||||
langchain-core = ">=0.3.0.dev1"
|
||||
pytest = ">=7,<9"
|
||||
syrupy = "^4"
|
||||
|
||||
@@ -1872,13 +1869,13 @@ url = "../text-splitters"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.1.125"
|
||||
version = "0.1.120"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langsmith-0.1.125-py3-none-any.whl", hash = "sha256:74ce8eb2663e1ed20bfcfc88d41e0712879306956c9938d1cdbab7d60458bdca"},
|
||||
{file = "langsmith-0.1.125.tar.gz", hash = "sha256:2c0eb0c3cbf22cff55bf519b8e889041f9a591bcf97af5152c8e130333c5940e"},
|
||||
{file = "langsmith-0.1.120-py3-none-any.whl", hash = "sha256:54d2785e301646c0988e0a69ebe4d976488c87b41928b358cb153b6ddd8db62b"},
|
||||
{file = "langsmith-0.1.120.tar.gz", hash = "sha256:25499ca187b41bd89d784b272b97a8d76f60e0e21bdf20336e8a2aa6a9b23ac9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2577,13 +2574,13 @@ ptyprocess = ">=0.5"
|
||||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "4.3.6"
|
||||
version = "4.3.2"
|
||||
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
|
||||
{file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
|
||||
{file = "platformdirs-4.3.2-py3-none-any.whl", hash = "sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617"},
|
||||
{file = "platformdirs-4.3.2.tar.gz", hash = "sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -2636,22 +2633,22 @@ wcwidth = "*"
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "5.28.2"
|
||||
version = "5.28.1"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"},
|
||||
{file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"},
|
||||
{file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"},
|
||||
{file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"},
|
||||
{file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"},
|
||||
{file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"},
|
||||
{file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"},
|
||||
{file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"},
|
||||
{file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"},
|
||||
{file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"},
|
||||
{file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"},
|
||||
{file = "protobuf-5.28.1-cp310-abi3-win32.whl", hash = "sha256:fc063acaf7a3d9ca13146fefb5b42ac94ab943ec6e978f543cd5637da2d57957"},
|
||||
{file = "protobuf-5.28.1-cp310-abi3-win_amd64.whl", hash = "sha256:4c7f5cb38c640919791c9f74ea80c5b82314c69a8409ea36f2599617d03989af"},
|
||||
{file = "protobuf-5.28.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4304e4fceb823d91699e924a1fdf95cde0e066f3b1c28edb665bda762ecde10f"},
|
||||
{file = "protobuf-5.28.1-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:0dfd86d2b5edf03d91ec2a7c15b4e950258150f14f9af5f51c17fa224ee1931f"},
|
||||
{file = "protobuf-5.28.1-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:51f09caab818707ab91cf09cc5c156026599cf05a4520779ccbf53c1b352fb25"},
|
||||
{file = "protobuf-5.28.1-cp38-cp38-win32.whl", hash = "sha256:1b04bde117a10ff9d906841a89ec326686c48ececeb65690f15b8cabe7149495"},
|
||||
{file = "protobuf-5.28.1-cp38-cp38-win_amd64.whl", hash = "sha256:cabfe43044ee319ad6832b2fda332646f9ef1636b0130186a3ae0a52fc264bb4"},
|
||||
{file = "protobuf-5.28.1-cp39-cp39-win32.whl", hash = "sha256:4b4b9a0562a35773ff47a3df823177ab71a1f5eb1ff56d8f842b7432ecfd7fd2"},
|
||||
{file = "protobuf-5.28.1-cp39-cp39-win_amd64.whl", hash = "sha256:f24e5d70e6af8ee9672ff605d5503491635f63d5db2fffb6472be78ba62efd8f"},
|
||||
{file = "protobuf-5.28.1-py3-none-any.whl", hash = "sha256:c529535e5c0effcf417682563719e5d8ac8d2b93de07a56108b4c2d436d7a29a"},
|
||||
{file = "protobuf-5.28.1.tar.gz", hash = "sha256:42597e938f83bb7f3e4b35f03aa45208d49ae8d5bcb4bc10b9fc825e0ab5e423"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2721,18 +2718,18 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.9.2"
|
||||
version = "2.9.1"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
|
||||
{file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
|
||||
{file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
|
||||
{file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.6.0"
|
||||
pydantic-core = "2.23.4"
|
||||
pydantic-core = "2.23.3"
|
||||
typing-extensions = [
|
||||
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
||||
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
||||
@@ -2744,100 +2741,100 @@ timezone = ["tzdata"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.23.4"
|
||||
version = "2.23.3"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
|
||||
{file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
|
||||
{file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
|
||||
{file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
|
||||
{file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
|
||||
{file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
|
||||
{file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
|
||||
{file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
|
||||
{file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
|
||||
{file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
|
||||
{file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
|
||||
{file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
|
||||
{file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
|
||||
{file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
|
||||
{file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
|
||||
{file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
|
||||
{file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
|
||||
{file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
|
||||
{file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
|
||||
{file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
|
||||
{file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
|
||||
{file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
|
||||
{file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
|
||||
{file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
|
||||
{file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
|
||||
{file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3585,64 +3582,64 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "sqlalchemy"
|
||||
version = "2.0.35"
|
||||
version = "2.0.34"
|
||||
description = "Database Abstraction Library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"},
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"},
|
||||
{file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"},
|
||||
{file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"},
|
||||
{file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"},
|
||||
{file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"},
|
||||
{file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"},
|
||||
{file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"},
|
||||
{file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"},
|
||||
{file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"},
|
||||
{file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"},
|
||||
{file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"},
|
||||
{file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"},
|
||||
{file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"},
|
||||
{file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"},
|
||||
{file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"},
|
||||
{file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"}
|
||||
greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"}
|
||||
typing-extensions = ">=4.6.0"
|
||||
|
||||
[package.extras]
|
||||
@@ -3841,13 +3838,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-protobuf"
|
||||
version = "5.27.0.20240920"
|
||||
version = "5.27.0.20240907"
|
||||
description = "Typing stubs for protobuf"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-protobuf-5.27.0.20240920.tar.gz", hash = "sha256:992d695315d11eb2d25e806122c9e1fd9fec282e96104f0a0cb9226cd5d90293"},
|
||||
{file = "types_protobuf-5.27.0.20240920-py3-none-any.whl", hash = "sha256:c04140bd3c761a55f4e661372b24a6f508169e0815f2b73da33f34b447ed7a8d"},
|
||||
{file = "types-protobuf-5.27.0.20240907.tar.gz", hash = "sha256:bb6f90f66b18d4d1c75667b6586334b0573a6fcee5eb0142a7348a765a7cbadc"},
|
||||
{file = "types_protobuf-5.27.0.20240907-py3-none-any.whl", hash = "sha256:5443270534cc8072909ef7ad9e1421ccff924ca658749a6396c0c43d64c32676"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3889,13 +3886,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "types-pyyaml"
|
||||
version = "6.0.12.20240917"
|
||||
version = "6.0.12.20240808"
|
||||
description = "Typing stubs for PyYAML"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"},
|
||||
{file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"},
|
||||
{file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"},
|
||||
{file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3927,15 +3924,29 @@ files = [
|
||||
[package.dependencies]
|
||||
types-urllib3 = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.32.0.20240907"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-requests-2.32.0.20240907.tar.gz", hash = "sha256:ff33935f061b5e81ec87997e91050f7b4af4f82027a7a7a9d9aaea04a963fdf8"},
|
||||
{file = "types_requests-2.32.0.20240907-py3-none-any.whl", hash = "sha256:1d1e79faeaf9d42def77f3c304893dea17a97cae98168ac69f3cb465516ee8da"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "75.1.0.20240917"
|
||||
version = "74.1.0.20240907"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"},
|
||||
{file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"},
|
||||
{file = "types-setuptools-74.1.0.20240907.tar.gz", hash = "sha256:0abdb082552ca966c1e5fc244e4853adc62971f6cd724fb1d8a3713b580e5a65"},
|
||||
{file = "types_setuptools-74.1.0.20240907-py3-none-any.whl", hash = "sha256:15b38c8e63ca34f42f6063ff4b1dd662ea20086166d5ad6a102e670a52574120"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4027,6 +4038,23 @@ brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotl
|
||||
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.2.3"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
|
||||
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
|
||||
h2 = ["h2 (>=4,<5)"]
|
||||
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "vcrpy"
|
||||
version = "6.0.1"
|
||||
@@ -4358,4 +4386,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.9,<4.0"
|
||||
content-hash = "d4ddaa606dc1af15b47b534482210ad687c8b96c816cb7ab13fa77d184514435"
|
||||
content-hash = "ee964a118892539749a10eeb2e7e8ce5570cf84faf02ea226fa2af865dc14135"
|
||||
|
||||
@@ -42,7 +42,7 @@ aiohttp = "^3.8.3"
|
||||
tenacity = "^8.1.0,!=8.4.0"
|
||||
dataclasses-json = ">= 0.5.7, < 0.7"
|
||||
pydantic-settings = "^2.4.0"
|
||||
langsmith = "^0.1.125"
|
||||
langsmith = "^0.1.112"
|
||||
|
||||
[[tool.poetry.dependencies.numpy]]
|
||||
version = "^1"
|
||||
@@ -63,6 +63,7 @@ addopts = "--strict-markers --strict-config --durations=5 --snapshot-warn-unused
|
||||
markers = [
|
||||
"requires: mark tests as requiring a specific library",
|
||||
"scheduled: mark tests to run in scheduled testing",
|
||||
"runs: mark tests to run in CI",
|
||||
"compile: mark placeholder test used to compile integration tests without running them",
|
||||
]
|
||||
asyncio_mode = "auto"
|
||||
|
||||
@@ -20,7 +20,7 @@ count=$(git grep -E '(@root_validator)|(@validator)|(@field_validator)|(@pre_ini
|
||||
# PRs that increase the current count will not be accepted.
|
||||
# PRs that decrease update the code in the repository
|
||||
# and allow decreasing the count of are welcome!
|
||||
current_count=128
|
||||
current_count=129
|
||||
|
||||
if [ "$count" -gt "$current_count" ]; then
|
||||
echo "The PR seems to be introducing new usage of @root_validator and/or @field_validator."
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
# Getting the absolute path of the current file's directory
|
||||
from importlib import util
|
||||
import os
|
||||
from typing import Dict, Sequence
|
||||
|
||||
import pytest
|
||||
from pytest import Config, Function, Parser
|
||||
|
||||
# Getting the absolute path of the current file's directory
|
||||
ABS_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Getting the absolute path of the project's root directory
|
||||
@@ -17,3 +22,83 @@ def _load_env() -> None:
|
||||
|
||||
|
||||
_load_env()
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
"""Add custom command line options to pytest."""
|
||||
parser.addoption(
|
||||
"--only-extended",
|
||||
action="store_true",
|
||||
help="Only run extended tests. Does not allow skipping any extended tests.",
|
||||
)
|
||||
parser.addoption(
|
||||
"--only-core",
|
||||
action="store_true",
|
||||
help="Only run core tests. Never runs any extended tests.",
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
|
||||
"""Add implementations for handling custom markers.
|
||||
|
||||
At the moment, this adds support for a custom `requires` marker.
|
||||
|
||||
The `requires` marker is used to denote tests that require one or more packages
|
||||
to be installed to run. If the package is not installed, the test is skipped.
|
||||
|
||||
The `requires` marker syntax is:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.requires("package1", "package2")
|
||||
def test_something():
|
||||
...
|
||||
"""
|
||||
# Mapping from the name of a package to whether it is installed or not.
|
||||
# Used to avoid repeated calls to `util.find_spec`
|
||||
required_pkgs_info: Dict[str, bool] = {}
|
||||
|
||||
only_extended = config.getoption("--only-extended") or False
|
||||
only_core = config.getoption("--only-core") or False
|
||||
|
||||
if only_extended and only_core:
|
||||
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
|
||||
|
||||
for item in items:
|
||||
requires_marker = item.get_closest_marker("requires")
|
||||
if requires_marker is not None:
|
||||
if only_core:
|
||||
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
|
||||
continue
|
||||
|
||||
# Iterate through the list of required packages
|
||||
required_pkgs = requires_marker.args
|
||||
for pkg in required_pkgs:
|
||||
# If we haven't yet checked whether the pkg is installed
|
||||
# let's check it and store the result.
|
||||
if pkg not in required_pkgs_info:
|
||||
try:
|
||||
installed = util.find_spec(pkg) is not None
|
||||
except Exception:
|
||||
installed = False
|
||||
required_pkgs_info[pkg] = installed
|
||||
|
||||
if not required_pkgs_info[pkg]:
|
||||
if only_extended:
|
||||
pytest.fail(
|
||||
f"Package `{pkg}` is not installed but is required for "
|
||||
f"extended tests. Please install the given package and "
|
||||
f"try again.",
|
||||
)
|
||||
|
||||
else:
|
||||
# If the package is not installed, we immediately break
|
||||
# and mark the test as skipped.
|
||||
item.add_marker(
|
||||
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
|
||||
)
|
||||
break
|
||||
else:
|
||||
if only_extended:
|
||||
item.add_marker(
|
||||
pytest.mark.skip(reason="Skipping not an extended test.")
|
||||
)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Sequence, Union
|
||||
|
||||
@@ -10,6 +11,7 @@ from langchain_community.document_loaders import (
|
||||
PDFMinerPDFasHTMLLoader,
|
||||
PyMuPDFLoader,
|
||||
PyPDFium2Loader,
|
||||
PyPDFLoader,
|
||||
UnstructuredPDFLoader,
|
||||
)
|
||||
|
||||
@@ -84,6 +86,41 @@ def test_pdfminer_pdf_as_html_loader() -> None:
|
||||
assert len(docs) == 1
|
||||
|
||||
|
||||
@pytest.mark.runs
|
||||
@pytest.mark.requires("pypdf")
|
||||
def test_pypdf_loader() -> None:
|
||||
"""Test PyPDFLoader."""
|
||||
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
|
||||
loader = PyPDFLoader(str(file_path))
|
||||
docs = loader.load()
|
||||
|
||||
assert len(docs) == 1
|
||||
|
||||
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
|
||||
loader = PyPDFLoader(str(file_path))
|
||||
|
||||
docs = loader.load()
|
||||
assert len(docs) == 16
|
||||
|
||||
|
||||
@pytest.mark.runs
|
||||
@pytest.mark.requires("pypdf")
|
||||
def test_pypdf_loader_with_layout() -> None:
|
||||
"""Test PyPDFLoader with layout mode."""
|
||||
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
|
||||
loader = PyPDFLoader(str(file_path), extraction_mode="layout")
|
||||
|
||||
docs = loader.load()
|
||||
first_page = docs[0].page_content
|
||||
|
||||
expected = (
|
||||
Path(__file__).parent.parent / "examples/layout-parser-paper-page-1.txt"
|
||||
).read_text(encoding="utf-8")
|
||||
cleaned_first_page = re.sub(r"\x00", "", first_page)
|
||||
cleaned_expected = re.sub(r"\x00", "", expected)
|
||||
assert cleaned_first_page == cleaned_expected
|
||||
|
||||
|
||||
def test_pypdfium2_loader() -> None:
|
||||
"""Test PyPDFium2Loader."""
|
||||
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from langchain_core.graph_vectorstores.links import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors import GLiNERLinkExtractor
|
||||
from langchain_community.graph_vectorstores.links import Link
|
||||
|
||||
PAGE_1 = """
|
||||
Cristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃ'tjɐnu
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from langchain_core.graph_vectorstores.links import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors import KeybertLinkExtractor
|
||||
from langchain_community.graph_vectorstores.links import Link
|
||||
|
||||
PAGE_1 = """
|
||||
Supervised learning is the machine learning task of learning a function that
|
||||
|
||||
@@ -4,9 +4,9 @@ from typing import Iterable, List, Optional, Type
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.graph_vectorstores.links import METADATA_LINKS_KEY, Link
|
||||
|
||||
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
|
||||
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
|
||||
|
||||
CASSANDRA_DEFAULT_KEYSPACE = "graph_test_keyspace"
|
||||
|
||||
|
||||
@@ -1,17 +1,28 @@
|
||||
"""Test sambanova API wrapper.
|
||||
|
||||
In order to run this test, you need to have a sambastudio base url,
|
||||
project id, endpoint id, and api key.
|
||||
You'll then need to set SAMBASTUDIO_BASE_URL, SAMBASTUDIO_BASE_URI
|
||||
In order to run this test, you need to have an sambaverse api key,
|
||||
and a sambaverse base url, project id, endpoint id, and api key.
|
||||
You'll then need to set SAMBAVERSE_API_KEY, SAMBASTUDIO_BASE_URL,
|
||||
SAMBASTUDIO_PROJECT_ID, SAMBASTUDIO_ENDPOINT_ID, and SAMBASTUDIO_API_KEY
|
||||
environment variables.
|
||||
"""
|
||||
|
||||
from langchain_community.llms.sambanova import SambaStudio
|
||||
from langchain_community.llms.sambanova import SambaStudio, Sambaverse
|
||||
|
||||
|
||||
def test_sambaverse_call() -> None:
|
||||
"""Test simple non-streaming call to sambaverse."""
|
||||
llm = Sambaverse(
|
||||
sambaverse_model_name="Meta/llama-2-7b-chat-hf",
|
||||
model_kwargs={"select_expert": "llama-2-7b-chat-hf"},
|
||||
)
|
||||
output = llm.invoke("What is LangChain")
|
||||
assert output
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_sambastudio_call() -> None:
|
||||
"""Test simple non-streaming call to sambastudio."""
|
||||
"""Test simple non-streaming call to sambaverse."""
|
||||
llm = SambaStudio()
|
||||
output = llm.invoke("What is LangChain")
|
||||
assert output
|
||||
|
||||
@@ -55,7 +55,6 @@ EXPECTED_ALL = [
|
||||
"DedocFileLoader",
|
||||
"DedocPDFLoader",
|
||||
"PebbloSafeLoader",
|
||||
"PebbloTextLoader",
|
||||
"DiffbotLoader",
|
||||
"DirectoryLoader",
|
||||
"DiscordChatLoader",
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain_community.document_loaders import PyPDFLoader
|
||||
|
||||
path_to_simple_pdf = (
|
||||
Path(__file__).parent.parent.parent / "integration_tests/examples/hello.pdf"
|
||||
)
|
||||
path_to_layout_pdf = (
|
||||
Path(__file__).parent.parent
|
||||
/ "document_loaders/sample_documents/layout-parser-paper.pdf"
|
||||
)
|
||||
path_to_layout_pdf_txt = (
|
||||
Path(__file__).parent.parent.parent
|
||||
/ "integration_tests/examples/layout-parser-paper-page-1.txt"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.requires("pypdf")
|
||||
def test_pypdf_loader() -> None:
|
||||
"""Test PyPDFLoader."""
|
||||
loader = PyPDFLoader(str(path_to_simple_pdf))
|
||||
docs = loader.load()
|
||||
|
||||
assert len(docs) == 1
|
||||
|
||||
loader = PyPDFLoader(str(path_to_layout_pdf))
|
||||
|
||||
docs = loader.load()
|
||||
assert len(docs) == 16
|
||||
for page, doc in enumerate(docs):
|
||||
assert doc.metadata["page"] == page
|
||||
assert doc.metadata["source"].endswith("layout-parser-paper.pdf")
|
||||
assert len(doc.page_content) > 10
|
||||
|
||||
first_page = docs[0].page_content
|
||||
for expected in ["LayoutParser", "A Unified Toolkit"]:
|
||||
assert expected in first_page
|
||||
|
||||
|
||||
@pytest.mark.requires("pypdf")
|
||||
def test_pypdf_loader_with_layout() -> None:
|
||||
"""Test PyPDFLoader with layout mode."""
|
||||
loader = PyPDFLoader(str(path_to_layout_pdf), extraction_mode="layout")
|
||||
|
||||
docs = loader.load()
|
||||
assert len(docs) == 16
|
||||
for page, doc in enumerate(docs):
|
||||
assert doc.metadata["page"] == page
|
||||
assert doc.metadata["source"].endswith("layout-parser-paper.pdf")
|
||||
assert len(doc.page_content) > 10
|
||||
|
||||
first_page = docs[0].page_content
|
||||
for expected in ["LayoutParser", "A Unified Toolkit"]:
|
||||
assert expected in first_page
|
||||
|
||||
expected = path_to_layout_pdf_txt.read_text(encoding="utf-8")
|
||||
cleaned_first_page = re.sub(r"\x00", "", first_page)
|
||||
cleaned_expected = re.sub(r"\x00", "", expected)
|
||||
assert cleaned_first_page == cleaned_expected
|
||||
@@ -25,11 +25,6 @@ def test_pebblo_import() -> None:
|
||||
from langchain_community.document_loaders import PebbloSafeLoader # noqa: F401
|
||||
|
||||
|
||||
def test_pebblo_text_loader_import() -> None:
|
||||
"""Test that the Pebblo text loader can be imported."""
|
||||
from langchain_community.document_loaders import PebbloTextLoader # noqa: F401
|
||||
|
||||
|
||||
def test_empty_filebased_loader(mocker: MockerFixture) -> None:
|
||||
"""Test basic file based csv loader."""
|
||||
# Setup
|
||||
@@ -151,42 +146,3 @@ def test_pebblo_safe_loader_api_key() -> None:
|
||||
# Assert
|
||||
assert loader.pb_client.api_key == api_key
|
||||
assert loader.pb_client.classifier_location == "local"
|
||||
|
||||
|
||||
def test_pebblo_text_loader(mocker: MockerFixture) -> None:
|
||||
"""
|
||||
Test loading in-memory text with PebbloTextLoader and PebbloSafeLoader.
|
||||
"""
|
||||
# Setup
|
||||
from langchain_community.document_loaders import PebbloSafeLoader, PebbloTextLoader
|
||||
|
||||
mocker.patch.multiple(
|
||||
"requests",
|
||||
get=MockResponse(json_data={"data": ""}, status_code=200),
|
||||
post=MockResponse(json_data={"data": ""}, status_code=200),
|
||||
)
|
||||
|
||||
text = "This is a test text."
|
||||
source = "fake_source"
|
||||
expected_docs = [
|
||||
Document(
|
||||
metadata={
|
||||
"full_path": source,
|
||||
"pb_checksum": None,
|
||||
},
|
||||
page_content=text,
|
||||
),
|
||||
]
|
||||
|
||||
# Exercise
|
||||
texts = [text]
|
||||
loader = PebbloSafeLoader(
|
||||
PebbloTextLoader(texts, source=source),
|
||||
"dummy_app_name",
|
||||
"dummy_owner",
|
||||
"dummy_description",
|
||||
)
|
||||
result = loader.load()
|
||||
|
||||
# Assert
|
||||
assert result == expected_docs
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from langchain_core.graph_vectorstores.links import Link
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors import HierarchyLinkExtractor
|
||||
from langchain_community.graph_vectorstores.links import Link
|
||||
|
||||
PATH_1 = ["Root", "H1", "h2"]
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
from langchain_core.graph_vectorstores import Link
|
||||
|
||||
from langchain_community.graph_vectorstores import Link
|
||||
from langchain_community.graph_vectorstores.extractors import (
|
||||
HtmlInput,
|
||||
HtmlLinkExtractor,
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from typing import Set
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.graph_vectorstores.links import Link, get_links
|
||||
|
||||
from langchain_community.graph_vectorstores.extractors import (
|
||||
LinkExtractor,
|
||||
LinkExtractorTransformer,
|
||||
)
|
||||
from langchain_community.graph_vectorstores.links import Link, get_links
|
||||
|
||||
TEXT1 = "Text1"
|
||||
TEXT2 = "Text2"
|
||||
|
||||
@@ -77,6 +77,7 @@ EXPECT_ALL = [
|
||||
"RWKV",
|
||||
"Replicate",
|
||||
"SagemakerEndpoint",
|
||||
"Sambaverse",
|
||||
"SambaStudio",
|
||||
"SelfHostedHuggingFaceLLM",
|
||||
"SelfHostedPipeline",
|
||||
|
||||
@@ -190,40 +190,3 @@ def test_additional_search_options() -> None:
|
||||
)
|
||||
assert vector_store.client is not None
|
||||
assert vector_store.client._api_version == "test"
|
||||
|
||||
|
||||
@pytest.mark.requires("azure.search.documents")
|
||||
def test_ids_used_correctly() -> None:
|
||||
"""Check whether vector store uses the document ids when provided with them."""
|
||||
from azure.search.documents import SearchClient
|
||||
from azure.search.documents.indexes import SearchIndexClient
|
||||
from langchain_core.documents import Document
|
||||
|
||||
class Response:
|
||||
def __init__(self) -> None:
|
||||
self.succeeded: bool = True
|
||||
|
||||
def mock_upload_documents(self, documents: List[object]) -> List[Response]: # type: ignore[no-untyped-def]
|
||||
# assume all documents uploaded successfuly
|
||||
response = [Response() for _ in documents]
|
||||
return response
|
||||
|
||||
documents = [
|
||||
Document(
|
||||
page_content="page zero Lorem Ipsum",
|
||||
metadata={"source": "document.pdf", "page": 0, "id": "ID-document-1"},
|
||||
),
|
||||
Document(
|
||||
page_content="page one Lorem Ipsum",
|
||||
metadata={"source": "document.pdf", "page": 1, "id": "ID-document-2"},
|
||||
),
|
||||
]
|
||||
ids_provided = [i.metadata.get("id") for i in documents]
|
||||
|
||||
with patch.object(
|
||||
SearchClient, "upload_documents", mock_upload_documents
|
||||
), patch.object(SearchIndexClient, "get_index", mock_default_index):
|
||||
vector_store = create_vector_store()
|
||||
ids_used_at_upload = vector_store.add_documents(documents, ids=ids_provided)
|
||||
assert len(ids_provided) == len(ids_used_at_upload)
|
||||
assert ids_provided == ids_used_at_upload
|
||||
|
||||
@@ -53,7 +53,7 @@ LangChain Core compiles LCEL sequences to an _optimized execution plan_, with au
|
||||
|
||||
For more check out the [LCEL docs](https://python.langchain.com/docs/expression_language/).
|
||||
|
||||

|
||||

|
||||
|
||||
For more advanced use cases, also check out [LangGraph](https://github.com/langchain-ai/langgraph), which is a graph-based runner for cyclic and recursive LLM workflows.
|
||||
|
||||
|
||||
@@ -14,8 +14,7 @@ import contextlib
|
||||
import functools
|
||||
import inspect
|
||||
import warnings
|
||||
from collections.abc import Generator
|
||||
from typing import Any, Callable, TypeVar, Union, cast
|
||||
from typing import Any, Callable, Generator, Type, TypeVar, Union, cast
|
||||
|
||||
from langchain_core._api.internal import is_caller_internal
|
||||
|
||||
@@ -27,7 +26,7 @@ class LangChainBetaWarning(DeprecationWarning):
|
||||
# PUBLIC API
|
||||
|
||||
|
||||
T = TypeVar("T", bound=Union[Callable[..., Any], type])
|
||||
T = TypeVar("T", bound=Union[Callable[..., Any], Type])
|
||||
|
||||
|
||||
def beta(
|
||||
@@ -155,7 +154,7 @@ def beta(
|
||||
_name = _name or obj.fget.__qualname__
|
||||
old_doc = obj.__doc__
|
||||
|
||||
class _BetaProperty(property):
|
||||
class _beta_property(property):
|
||||
"""A beta property."""
|
||||
|
||||
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
|
||||
@@ -186,7 +185,7 @@ def beta(
|
||||
|
||||
def finalize(wrapper: Callable[..., Any], new_doc: str) -> Any:
|
||||
"""Finalize the property."""
|
||||
return _BetaProperty(
|
||||
return _beta_property(
|
||||
fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc
|
||||
)
|
||||
|
||||
|
||||
@@ -14,10 +14,11 @@ import contextlib
|
||||
import functools
|
||||
import inspect
|
||||
import warnings
|
||||
from collections.abc import Generator
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generator,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
@@ -40,7 +41,7 @@ class LangChainPendingDeprecationWarning(PendingDeprecationWarning):
|
||||
|
||||
|
||||
# Last Any should be FieldInfoV1 but this leads to circular imports
|
||||
T = TypeVar("T", bound=Union[type, Callable[..., Any], Any])
|
||||
T = TypeVar("T", bound=Union[Type, Callable[..., Any], Any])
|
||||
|
||||
|
||||
def _validate_deprecation_params(
|
||||
@@ -261,10 +262,10 @@ def deprecated(
|
||||
if not _obj_type:
|
||||
_obj_type = "attribute"
|
||||
wrapped = None
|
||||
_name = _name or cast(Union[type, Callable], obj.fget).__qualname__
|
||||
_name = _name or cast(Union[Type, Callable], obj.fget).__qualname__
|
||||
old_doc = obj.__doc__
|
||||
|
||||
class _DeprecatedProperty(property):
|
||||
class _deprecated_property(property):
|
||||
"""A deprecated property."""
|
||||
|
||||
def __init__(self, fget=None, fset=None, fdel=None, doc=None): # type: ignore[no-untyped-def]
|
||||
@@ -297,13 +298,13 @@ def deprecated(
|
||||
"""Finalize the property."""
|
||||
return cast(
|
||||
T,
|
||||
_DeprecatedProperty(
|
||||
_deprecated_property(
|
||||
fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc
|
||||
),
|
||||
)
|
||||
|
||||
else:
|
||||
_name = _name or cast(Union[type, Callable], obj).__qualname__
|
||||
_name = _name or cast(Union[Type, Callable], obj).__qualname__
|
||||
if not _obj_type:
|
||||
# edge case: when a function is within another function
|
||||
# within a test, this will call it a "method" not a "function"
|
||||
|
||||
@@ -25,8 +25,7 @@ The schemas for the agents themselves are defined in langchain.agents.agent.
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Literal, Union
|
||||
from typing import Any, List, Literal, Sequence, Union
|
||||
|
||||
from langchain_core.load.serializable import Serializable
|
||||
from langchain_core.messages import (
|
||||
@@ -72,7 +71,7 @@ class AgentAction(Serializable):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object.
|
||||
Default is ["langchain", "schema", "agent"]."""
|
||||
return ["langchain", "schema", "agent"]
|
||||
@@ -146,7 +145,7 @@ class AgentFinish(Serializable):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "agent"]
|
||||
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
import asyncio
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from collections.abc import Awaitable, Mapping, Sequence
|
||||
from functools import partial
|
||||
from itertools import groupby
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
DefaultDict,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
@@ -24,7 +30,7 @@ from langchain_core.runnables.config import RunnableConfig, ensure_config, patch
|
||||
from langchain_core.runnables.utils import ConfigurableFieldSpec, Input, Output
|
||||
|
||||
T = TypeVar("T")
|
||||
Values = dict[Union[asyncio.Event, threading.Event], Any]
|
||||
Values = Dict[Union[asyncio.Event, threading.Event], Any]
|
||||
CONTEXT_CONFIG_PREFIX = "__context__/"
|
||||
CONTEXT_CONFIG_SUFFIX_GET = "/get"
|
||||
CONTEXT_CONFIG_SUFFIX_SET = "/set"
|
||||
@@ -64,10 +70,10 @@ def _key_from_id(id_: str) -> str:
|
||||
|
||||
def _config_with_context(
|
||||
config: RunnableConfig,
|
||||
steps: list[Runnable],
|
||||
steps: List[Runnable],
|
||||
setter: Callable,
|
||||
getter: Callable,
|
||||
event_cls: Union[type[threading.Event], type[asyncio.Event]],
|
||||
event_cls: Union[Type[threading.Event], Type[asyncio.Event]],
|
||||
) -> RunnableConfig:
|
||||
if any(k.startswith(CONTEXT_CONFIG_PREFIX) for k in config.get("configurable", {})):
|
||||
return config
|
||||
@@ -93,10 +99,10 @@ def _config_with_context(
|
||||
}
|
||||
|
||||
values: Values = {}
|
||||
events: defaultdict[str, Union[asyncio.Event, threading.Event]] = defaultdict(
|
||||
events: DefaultDict[str, Union[asyncio.Event, threading.Event]] = defaultdict(
|
||||
event_cls
|
||||
)
|
||||
context_funcs: dict[str, Callable[[], Any]] = {}
|
||||
context_funcs: Dict[str, Callable[[], Any]] = {}
|
||||
for key, group in grouped_by_key.items():
|
||||
getters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_GET)]
|
||||
setters = [s for s in group if s[0].id.endswith(CONTEXT_CONFIG_SUFFIX_SET)]
|
||||
@@ -123,7 +129,7 @@ def _config_with_context(
|
||||
|
||||
def aconfig_with_context(
|
||||
config: RunnableConfig,
|
||||
steps: list[Runnable],
|
||||
steps: List[Runnable],
|
||||
) -> RunnableConfig:
|
||||
"""Asynchronously patch a runnable config with context getters and setters.
|
||||
|
||||
@@ -139,7 +145,7 @@ def aconfig_with_context(
|
||||
|
||||
def config_with_context(
|
||||
config: RunnableConfig,
|
||||
steps: list[Runnable],
|
||||
steps: List[Runnable],
|
||||
) -> RunnableConfig:
|
||||
"""Patch a runnable config with context getters and setters.
|
||||
|
||||
@@ -159,13 +165,13 @@ class ContextGet(RunnableSerializable):
|
||||
|
||||
prefix: str = ""
|
||||
|
||||
key: Union[str, list[str]]
|
||||
key: Union[str, List[str]]
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"ContextGet({_print_keys(self.key)})"
|
||||
|
||||
@property
|
||||
def ids(self) -> list[str]:
|
||||
def ids(self) -> List[str]:
|
||||
prefix = self.prefix + "/" if self.prefix else ""
|
||||
keys = self.key if isinstance(self.key, list) else [self.key]
|
||||
return [
|
||||
@@ -174,7 +180,7 @@ class ContextGet(RunnableSerializable):
|
||||
]
|
||||
|
||||
@property
|
||||
def config_specs(self) -> list[ConfigurableFieldSpec]:
|
||||
def config_specs(self) -> List[ConfigurableFieldSpec]:
|
||||
return super().config_specs + [
|
||||
ConfigurableFieldSpec(
|
||||
id=id_,
|
||||
@@ -250,7 +256,7 @@ class ContextSet(RunnableSerializable):
|
||||
return f"ContextSet({_print_keys(list(self.keys.keys()))})"
|
||||
|
||||
@property
|
||||
def ids(self) -> list[str]:
|
||||
def ids(self) -> List[str]:
|
||||
prefix = self.prefix + "/" if self.prefix else ""
|
||||
return [
|
||||
f"{CONTEXT_CONFIG_PREFIX}{prefix}{key}{CONTEXT_CONFIG_SUFFIX_SET}"
|
||||
@@ -258,7 +264,7 @@ class ContextSet(RunnableSerializable):
|
||||
]
|
||||
|
||||
@property
|
||||
def config_specs(self) -> list[ConfigurableFieldSpec]:
|
||||
def config_specs(self) -> List[ConfigurableFieldSpec]:
|
||||
mapper_config_specs = [
|
||||
s
|
||||
for mapper in self.keys.values()
|
||||
@@ -358,7 +364,7 @@ class Context:
|
||||
return PrefixContext(prefix=scope)
|
||||
|
||||
@staticmethod
|
||||
def getter(key: Union[str, list[str]], /) -> ContextGet:
|
||||
def getter(key: Union[str, List[str]], /) -> ContextGet:
|
||||
return ContextGet(key=key)
|
||||
|
||||
@staticmethod
|
||||
@@ -379,7 +385,7 @@ class PrefixContext:
|
||||
def __init__(self, prefix: str = ""):
|
||||
self.prefix = prefix
|
||||
|
||||
def getter(self, key: Union[str, list[str]], /) -> ContextGet:
|
||||
def getter(self, key: Union[str, List[str]], /) -> ContextGet:
|
||||
return ContextGet(key=key, prefix=self.prefix)
|
||||
|
||||
def setter(
|
||||
|
||||
@@ -23,8 +23,7 @@ Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Dict, Optional, Sequence, Tuple
|
||||
|
||||
from langchain_core.outputs import Generation
|
||||
from langchain_core.runnables import run_in_executor
|
||||
@@ -158,7 +157,7 @@ class InMemoryCache(BaseCache):
|
||||
Raises:
|
||||
ValueError: If maxsize is less than or equal to 0.
|
||||
"""
|
||||
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
if maxsize is not None and maxsize <= 0:
|
||||
raise ValueError("maxsize must be greater than 0")
|
||||
self._maxsize = maxsize
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING, Any, Optional, TypeVar, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, TypeVar, Union
|
||||
from uuid import UUID
|
||||
|
||||
from tenacity import RetryCallState
|
||||
@@ -119,7 +118,7 @@ class ChainManagerMixin:
|
||||
|
||||
def on_chain_end(
|
||||
self,
|
||||
outputs: dict[str, Any],
|
||||
outputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -223,13 +222,13 @@ class CallbackManagerMixin:
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
prompts: list[str],
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when LLM starts running.
|
||||
@@ -250,13 +249,13 @@ class CallbackManagerMixin:
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when a chat model starts running.
|
||||
@@ -281,13 +280,13 @@ class CallbackManagerMixin:
|
||||
|
||||
def on_retriever_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
serialized: Dict[str, Any],
|
||||
query: str,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when the Retriever starts running.
|
||||
@@ -304,13 +303,13 @@ class CallbackManagerMixin:
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
inputs: dict[str, Any],
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when a chain starts running.
|
||||
@@ -327,14 +326,14 @@ class CallbackManagerMixin:
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when the tool starts running.
|
||||
@@ -394,8 +393,8 @@ class RunManagerMixin:
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Override to define a handler for a custom event.
|
||||
@@ -471,13 +470,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_llm_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
prompts: list[str],
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM starts running.
|
||||
@@ -498,13 +497,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Run when a chat model starts running.
|
||||
@@ -534,7 +533,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on new LLM token. Only available when streaming is enabled.
|
||||
@@ -555,7 +554,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM ends running.
|
||||
@@ -574,7 +573,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM errors.
|
||||
@@ -591,13 +590,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_chain_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
inputs: dict[str, Any],
|
||||
serialized: Dict[str, Any],
|
||||
inputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when a chain starts running.
|
||||
@@ -614,11 +613,11 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_chain_end(
|
||||
self,
|
||||
outputs: dict[str, Any],
|
||||
outputs: Dict[str, Any],
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when a chain ends running.
|
||||
@@ -637,7 +636,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when chain errors.
|
||||
@@ -652,14 +651,14 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_tool_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
serialized: Dict[str, Any],
|
||||
input_str: str,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when the tool starts running.
|
||||
@@ -681,7 +680,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when the tool ends running.
|
||||
@@ -700,7 +699,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when tool errors.
|
||||
@@ -719,7 +718,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on an arbitrary text.
|
||||
@@ -755,7 +754,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on agent action.
|
||||
@@ -774,7 +773,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on the agent end.
|
||||
@@ -789,13 +788,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
async def on_retriever_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
serialized: Dict[str, Any],
|
||||
query: str,
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on the retriever start.
|
||||
@@ -816,7 +815,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on the retriever end.
|
||||
@@ -834,7 +833,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
*,
|
||||
run_id: UUID,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run on retriever error.
|
||||
@@ -853,8 +852,8 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
data: Any,
|
||||
*,
|
||||
run_id: UUID,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Override to define a handler for a custom event.
|
||||
@@ -881,14 +880,14 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
handlers: list[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[list[BaseCallbackHandler]] = None,
|
||||
handlers: List[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
*,
|
||||
tags: Optional[list[str]] = None,
|
||||
inheritable_tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
inheritable_metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
inheritable_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Initialize callback manager.
|
||||
|
||||
@@ -902,8 +901,8 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
Default is None.
|
||||
metadata (Optional[Dict[str, Any]]): The metadata. Default is None.
|
||||
"""
|
||||
self.handlers: list[BaseCallbackHandler] = handlers
|
||||
self.inheritable_handlers: list[BaseCallbackHandler] = (
|
||||
self.handlers: List[BaseCallbackHandler] = handlers
|
||||
self.inheritable_handlers: List[BaseCallbackHandler] = (
|
||||
inheritable_handlers or []
|
||||
)
|
||||
self.parent_run_id: Optional[UUID] = parent_run_id
|
||||
@@ -1003,7 +1002,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
self.inheritable_handlers.remove(handler)
|
||||
|
||||
def set_handlers(
|
||||
self, handlers: list[BaseCallbackHandler], inherit: bool = True
|
||||
self, handlers: List[BaseCallbackHandler], inherit: bool = True
|
||||
) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager.
|
||||
|
||||
@@ -1025,7 +1024,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""
|
||||
self.set_handlers([handler], inherit=inherit)
|
||||
|
||||
def add_tags(self, tags: list[str], inherit: bool = True) -> None:
|
||||
def add_tags(self, tags: List[str], inherit: bool = True) -> None:
|
||||
"""Add tags to the callback manager.
|
||||
|
||||
Args:
|
||||
@@ -1039,7 +1038,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
if inherit:
|
||||
self.inheritable_tags.extend(tags)
|
||||
|
||||
def remove_tags(self, tags: list[str]) -> None:
|
||||
def remove_tags(self, tags: List[str]) -> None:
|
||||
"""Remove tags from the callback manager.
|
||||
|
||||
Args:
|
||||
@@ -1049,7 +1048,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
self.tags.remove(tag)
|
||||
self.inheritable_tags.remove(tag)
|
||||
|
||||
def add_metadata(self, metadata: dict[str, Any], inherit: bool = True) -> None:
|
||||
def add_metadata(self, metadata: Dict[str, Any], inherit: bool = True) -> None:
|
||||
"""Add metadata to the callback manager.
|
||||
|
||||
Args:
|
||||
@@ -1060,7 +1059,7 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
if inherit:
|
||||
self.inheritable_metadata.update(metadata)
|
||||
|
||||
def remove_metadata(self, keys: list[str]) -> None:
|
||||
def remove_metadata(self, keys: List[str]) -> None:
|
||||
"""Remove metadata from the callback manager.
|
||||
|
||||
Args:
|
||||
@@ -1071,4 +1070,4 @@ class BaseCallbackManager(CallbackManagerMixin):
|
||||
self.inheritable_metadata.pop(key)
|
||||
|
||||
|
||||
Callbacks = Optional[Union[list[BaseCallbackHandler], BaseCallbackManager]]
|
||||
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, TextIO, cast
|
||||
from typing import Any, Dict, Optional, TextIO, cast
|
||||
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
@@ -35,7 +35,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
self.file.close()
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Print out that we are entering a chain.
|
||||
|
||||
@@ -51,7 +51,7 @@ class FileCallbackHandler(BaseCallbackHandler):
|
||||
file=self.file,
|
||||
)
|
||||
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Print out that we finished a chain.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -5,15 +5,21 @@ import functools
|
||||
import logging
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import AsyncGenerator, Coroutine, Generator, Sequence
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
from contextvars import copy_context
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AsyncGenerator,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Dict,
|
||||
Generator,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
@@ -58,12 +64,12 @@ def trace_as_chain_group(
|
||||
group_name: str,
|
||||
callback_manager: Optional[CallbackManager] = None,
|
||||
*,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> Generator[CallbackManagerForChainGroup, None, None]:
|
||||
"""Get a callback manager for a chain group in a context manager.
|
||||
Useful for grouping different calls together as a single run even if
|
||||
@@ -138,12 +144,12 @@ async def atrace_as_chain_group(
|
||||
group_name: str,
|
||||
callback_manager: Optional[AsyncCallbackManager] = None,
|
||||
*,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
project_name: Optional[str] = None,
|
||||
example_id: Optional[Union[str, UUID]] = None,
|
||||
run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]:
|
||||
"""Get an async callback manager for a chain group in a context manager.
|
||||
Useful for grouping different async calls together as a single run even if
|
||||
@@ -234,7 +240,7 @@ def shielded(func: Func) -> Func:
|
||||
|
||||
|
||||
def handle_event(
|
||||
handlers: list[BaseCallbackHandler],
|
||||
handlers: List[BaseCallbackHandler],
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
*args: Any,
|
||||
@@ -252,10 +258,10 @@ def handle_event(
|
||||
*args: The arguments to pass to the event handler.
|
||||
**kwargs: The keyword arguments to pass to the event handler
|
||||
"""
|
||||
coros: list[Coroutine[Any, Any, Any]] = []
|
||||
coros: List[Coroutine[Any, Any, Any]] = []
|
||||
|
||||
try:
|
||||
message_strings: Optional[list[str]] = None
|
||||
message_strings: Optional[List[str]] = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
if ignore_condition_name is None or not getattr(
|
||||
@@ -312,7 +318,7 @@ def handle_event(
|
||||
_run_coros(coros)
|
||||
|
||||
|
||||
def _run_coros(coros: list[Coroutine[Any, Any, Any]]) -> None:
|
||||
def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None:
|
||||
if hasattr(asyncio, "Runner"):
|
||||
# Python 3.11+
|
||||
# Run the coroutines in a new event loop, taking care to
|
||||
@@ -393,7 +399,7 @@ async def _ahandle_event_for_handler(
|
||||
|
||||
|
||||
async def ahandle_event(
|
||||
handlers: list[BaseCallbackHandler],
|
||||
handlers: List[BaseCallbackHandler],
|
||||
event_name: str,
|
||||
ignore_condition_name: Optional[str],
|
||||
*args: Any,
|
||||
@@ -440,13 +446,13 @@ class BaseRunManager(RunManagerMixin):
|
||||
self,
|
||||
*,
|
||||
run_id: UUID,
|
||||
handlers: list[BaseCallbackHandler],
|
||||
inheritable_handlers: list[BaseCallbackHandler],
|
||||
handlers: List[BaseCallbackHandler],
|
||||
inheritable_handlers: List[BaseCallbackHandler],
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
tags: Optional[list[str]] = None,
|
||||
inheritable_tags: Optional[list[str]] = None,
|
||||
metadata: Optional[dict[str, Any]] = None,
|
||||
inheritable_metadata: Optional[dict[str, Any]] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
inheritable_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Initialize the run manager.
|
||||
|
||||
@@ -475,7 +481,7 @@ class BaseRunManager(RunManagerMixin):
|
||||
self.inheritable_metadata = inheritable_metadata or {}
|
||||
|
||||
@classmethod
|
||||
def get_noop_manager(cls: type[BRM]) -> BRM:
|
||||
def get_noop_manager(cls: Type[BRM]) -> BRM:
|
||||
"""Return a manager that doesn't perform any operations.
|
||||
|
||||
Returns:
|
||||
@@ -818,7 +824,7 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
|
||||
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
|
||||
"""Callback manager for chain run."""
|
||||
|
||||
def on_chain_end(self, outputs: Union[dict[str, Any], Any], **kwargs: Any) -> None:
|
||||
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
|
||||
"""Run when chain ends running.
|
||||
|
||||
Args:
|
||||
@@ -923,7 +929,7 @@ class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
|
||||
|
||||
@shielded
|
||||
async def on_chain_end(
|
||||
self, outputs: Union[dict[str, Any], Any], **kwargs: Any
|
||||
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when a chain ends running.
|
||||
|
||||
@@ -1242,11 +1248,11 @@ class CallbackManager(BaseCallbackManager):
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
prompts: list[str],
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[CallbackManagerForLLMRun]:
|
||||
) -> List[CallbackManagerForLLMRun]:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
@@ -1293,11 +1299,11 @@ class CallbackManager(BaseCallbackManager):
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[CallbackManagerForLLMRun]:
|
||||
) -> List[CallbackManagerForLLMRun]:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
@@ -1348,8 +1354,8 @@ class CallbackManager(BaseCallbackManager):
|
||||
|
||||
def on_chain_start(
|
||||
self,
|
||||
serialized: Optional[dict[str, Any]],
|
||||
inputs: Union[dict[str, Any], Any],
|
||||
serialized: Optional[Dict[str, Any]],
|
||||
inputs: Union[Dict[str, Any], Any],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForChainRun:
|
||||
@@ -1392,11 +1398,11 @@ class CallbackManager(BaseCallbackManager):
|
||||
|
||||
def on_tool_start(
|
||||
self,
|
||||
serialized: Optional[dict[str, Any]],
|
||||
serialized: Optional[Dict[str, Any]],
|
||||
input_str: str,
|
||||
run_id: Optional[UUID] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> CallbackManagerForToolRun:
|
||||
"""Run when tool starts running.
|
||||
@@ -1447,7 +1453,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
|
||||
def on_retriever_start(
|
||||
self,
|
||||
serialized: Optional[dict[str, Any]],
|
||||
serialized: Optional[Dict[str, Any]],
|
||||
query: str,
|
||||
run_id: Optional[UUID] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -1535,10 +1541,10 @@ class CallbackManager(BaseCallbackManager):
|
||||
inheritable_callbacks: Callbacks = None,
|
||||
local_callbacks: Callbacks = None,
|
||||
verbose: bool = False,
|
||||
inheritable_tags: Optional[list[str]] = None,
|
||||
local_tags: Optional[list[str]] = None,
|
||||
inheritable_metadata: Optional[dict[str, Any]] = None,
|
||||
local_metadata: Optional[dict[str, Any]] = None,
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
local_tags: Optional[List[str]] = None,
|
||||
inheritable_metadata: Optional[Dict[str, Any]] = None,
|
||||
local_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> CallbackManager:
|
||||
"""Configure the callback manager.
|
||||
|
||||
@@ -1577,8 +1583,8 @@ class CallbackManagerForChainGroup(CallbackManager):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
handlers: list[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[list[BaseCallbackHandler]] = None,
|
||||
handlers: List[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
*,
|
||||
parent_run_manager: CallbackManagerForChainRun,
|
||||
@@ -1675,7 +1681,7 @@ class CallbackManagerForChainGroup(CallbackManager):
|
||||
manager.add_handler(handler, inherit=True)
|
||||
return manager
|
||||
|
||||
def on_chain_end(self, outputs: Union[dict[str, Any], Any], **kwargs: Any) -> None:
|
||||
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
|
||||
"""Run when traced chain group ends.
|
||||
|
||||
Args:
|
||||
@@ -1710,11 +1716,11 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
|
||||
async def on_llm_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
prompts: list[str],
|
||||
serialized: Dict[str, Any],
|
||||
prompts: List[str],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[AsyncCallbackManagerForLLMRun]:
|
||||
) -> List[AsyncCallbackManagerForLLMRun]:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
Args:
|
||||
@@ -1773,11 +1779,11 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
|
||||
async def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[AsyncCallbackManagerForLLMRun]:
|
||||
) -> List[AsyncCallbackManagerForLLMRun]:
|
||||
"""Async run when LLM starts running.
|
||||
|
||||
Args:
|
||||
@@ -1834,8 +1840,8 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
|
||||
async def on_chain_start(
|
||||
self,
|
||||
serialized: Optional[dict[str, Any]],
|
||||
inputs: Union[dict[str, Any], Any],
|
||||
serialized: Optional[Dict[str, Any]],
|
||||
inputs: Union[Dict[str, Any], Any],
|
||||
run_id: Optional[UUID] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncCallbackManagerForChainRun:
|
||||
@@ -1880,7 +1886,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
|
||||
async def on_tool_start(
|
||||
self,
|
||||
serialized: Optional[dict[str, Any]],
|
||||
serialized: Optional[Dict[str, Any]],
|
||||
input_str: str,
|
||||
run_id: Optional[UUID] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -1969,7 +1975,7 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
|
||||
async def on_retriever_start(
|
||||
self,
|
||||
serialized: Optional[dict[str, Any]],
|
||||
serialized: Optional[Dict[str, Any]],
|
||||
query: str,
|
||||
run_id: Optional[UUID] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
@@ -2021,10 +2027,10 @@ class AsyncCallbackManager(BaseCallbackManager):
|
||||
inheritable_callbacks: Callbacks = None,
|
||||
local_callbacks: Callbacks = None,
|
||||
verbose: bool = False,
|
||||
inheritable_tags: Optional[list[str]] = None,
|
||||
local_tags: Optional[list[str]] = None,
|
||||
inheritable_metadata: Optional[dict[str, Any]] = None,
|
||||
local_metadata: Optional[dict[str, Any]] = None,
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
local_tags: Optional[List[str]] = None,
|
||||
inheritable_metadata: Optional[Dict[str, Any]] = None,
|
||||
local_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> AsyncCallbackManager:
|
||||
"""Configure the async callback manager.
|
||||
|
||||
@@ -2063,8 +2069,8 @@ class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
handlers: list[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[list[BaseCallbackHandler]] = None,
|
||||
handlers: List[BaseCallbackHandler],
|
||||
inheritable_handlers: Optional[List[BaseCallbackHandler]] = None,
|
||||
parent_run_id: Optional[UUID] = None,
|
||||
*,
|
||||
parent_run_manager: AsyncCallbackManagerForChainRun,
|
||||
@@ -2163,7 +2169,7 @@ class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
|
||||
return manager
|
||||
|
||||
async def on_chain_end(
|
||||
self, outputs: Union[dict[str, Any], Any], **kwargs: Any
|
||||
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when traced chain group ends.
|
||||
|
||||
@@ -2196,14 +2202,14 @@ H = TypeVar("H", bound=BaseCallbackHandler, covariant=True)
|
||||
|
||||
|
||||
def _configure(
|
||||
callback_manager_cls: type[T],
|
||||
callback_manager_cls: Type[T],
|
||||
inheritable_callbacks: Callbacks = None,
|
||||
local_callbacks: Callbacks = None,
|
||||
verbose: bool = False,
|
||||
inheritable_tags: Optional[list[str]] = None,
|
||||
local_tags: Optional[list[str]] = None,
|
||||
inheritable_metadata: Optional[dict[str, Any]] = None,
|
||||
local_metadata: Optional[dict[str, Any]] = None,
|
||||
inheritable_tags: Optional[List[str]] = None,
|
||||
local_tags: Optional[List[str]] = None,
|
||||
inheritable_metadata: Optional[Dict[str, Any]] = None,
|
||||
local_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> T:
|
||||
"""Configure the callback manager.
|
||||
|
||||
@@ -2348,7 +2354,7 @@ def _configure(
|
||||
and handler_class is not None
|
||||
)
|
||||
if var.get() is not None or create_one:
|
||||
var_handler = var.get() or cast(type[BaseCallbackHandler], handler_class)()
|
||||
var_handler = var.get() or cast(Type[BaseCallbackHandler], handler_class)()
|
||||
if handler_class is None:
|
||||
if not any(
|
||||
handler is var_handler # direct pointer comparison
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from langchain_core.callbacks.base import BaseCallbackHandler
|
||||
from langchain_core.utils import print_text
|
||||
@@ -23,7 +23,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
self.color = color
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Print out that we are entering a chain.
|
||||
|
||||
@@ -35,7 +35,7 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
|
||||
print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201
|
||||
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Print out that we finished a chain.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Dict, List
|
||||
|
||||
from langchain_core.callbacks.base import BaseCallbackHandler
|
||||
|
||||
@@ -17,7 +17,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Callback handler for streaming. Only works with LLMs that support streaming."""
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM starts running.
|
||||
|
||||
@@ -29,8 +29,8 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
def on_chat_model_start(
|
||||
self,
|
||||
serialized: dict[str, Any],
|
||||
messages: list[list[BaseMessage]],
|
||||
serialized: Dict[str, Any],
|
||||
messages: List[List[BaseMessage]],
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Run when LLM starts running.
|
||||
@@ -68,7 +68,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when a chain starts running.
|
||||
|
||||
@@ -78,7 +78,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
**kwargs (Any): Additional keyword arguments.
|
||||
"""
|
||||
|
||||
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""Run when a chain ends running.
|
||||
|
||||
Args:
|
||||
@@ -95,7 +95,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: dict[str, Any], input_str: str, **kwargs: Any
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when the tool starts running.
|
||||
|
||||
|
||||
@@ -18,8 +18,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from typing import Union
|
||||
from typing import List, Sequence, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -88,7 +87,7 @@ class BaseChatMessageHistory(ABC):
|
||||
f.write("[]")
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage]
|
||||
messages: List[BaseMessage]
|
||||
"""A property or attribute that returns a list of messages.
|
||||
|
||||
In general, getting the messages may involve IO to the underlying
|
||||
@@ -96,7 +95,7 @@ class BaseChatMessageHistory(ABC):
|
||||
latency.
|
||||
"""
|
||||
|
||||
async def aget_messages(self) -> list[BaseMessage]:
|
||||
async def aget_messages(self) -> List[BaseMessage]:
|
||||
"""Async version of getting messages.
|
||||
|
||||
Can over-ride this method to provide an efficient async implementation.
|
||||
@@ -205,10 +204,10 @@ class InMemoryChatMessageHistory(BaseChatMessageHistory, BaseModel):
|
||||
Stores messages in a memory list.
|
||||
"""
|
||||
|
||||
messages: list[BaseMessage] = Field(default_factory=list)
|
||||
messages: List[BaseMessage] = Field(default_factory=list)
|
||||
"""A list of messages stored in memory."""
|
||||
|
||||
async def aget_messages(self) -> list[BaseMessage]:
|
||||
async def aget_messages(self) -> List[BaseMessage]:
|
||||
"""Async version of getting messages.
|
||||
|
||||
Can over-ride this method to provide an efficient async implementation.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Iterator
|
||||
from typing import Iterator, List
|
||||
|
||||
from langchain_core.chat_sessions import ChatSession
|
||||
|
||||
@@ -15,7 +15,7 @@ class BaseChatLoader(ABC):
|
||||
An iterator of chat sessions.
|
||||
"""
|
||||
|
||||
def load(self) -> list[ChatSession]:
|
||||
def load(self) -> List[ChatSession]:
|
||||
"""Eagerly load the chat sessions into memory.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""**Chat Sessions** are a collection of messages and function calls."""
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import TypedDict
|
||||
from typing import Sequence, TypedDict
|
||||
|
||||
from langchain_core.messages import BaseMessage
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import AsyncIterator, Iterator
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING, AsyncIterator, Iterator, List, Optional
|
||||
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.runnables import run_in_executor
|
||||
@@ -26,17 +25,17 @@ class BaseLoader(ABC): # noqa: B024
|
||||
|
||||
# Sub-classes should not implement this method directly. Instead, they
|
||||
# should implement the lazy load method.
|
||||
def load(self) -> list[Document]:
|
||||
def load(self) -> List[Document]:
|
||||
"""Load data into Document objects."""
|
||||
return list(self.lazy_load())
|
||||
|
||||
async def aload(self) -> list[Document]:
|
||||
async def aload(self) -> List[Document]:
|
||||
"""Load data into Document objects."""
|
||||
return [document async for document in self.alazy_load()]
|
||||
|
||||
def load_and_split(
|
||||
self, text_splitter: Optional[TextSplitter] = None
|
||||
) -> list[Document]:
|
||||
) -> List[Document]:
|
||||
"""Load Documents and split into chunks. Chunks are returned as Documents.
|
||||
|
||||
Do not override this method. It should be considered to be deprecated!
|
||||
@@ -109,7 +108,7 @@ class BaseBlobParser(ABC):
|
||||
Generator of documents
|
||||
"""
|
||||
|
||||
def parse(self, blob: Blob) -> list[Document]:
|
||||
def parse(self, blob: Blob) -> List[Document]:
|
||||
"""Eagerly parse the blob into a document or documents.
|
||||
|
||||
This is a convenience method for interactive development environment.
|
||||
|
||||
@@ -8,7 +8,7 @@ In addition, content loading code should provide a lazy loading interface by def
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Iterable
|
||||
from typing import Iterable
|
||||
|
||||
# Re-export Blob and PathLike for backwards compatibility
|
||||
from langchain_core.documents.base import Blob as Blob
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import datetime
|
||||
import json
|
||||
import uuid
|
||||
from collections.abc import Iterator, Sequence
|
||||
from typing import Any, Callable, Optional, Union
|
||||
from typing import Any, Callable, Iterator, Optional, Sequence, Union
|
||||
|
||||
from langsmith import Client as LangSmithClient
|
||||
|
||||
|
||||
@@ -2,10 +2,9 @@ from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import mimetypes
|
||||
from collections.abc import Generator
|
||||
from io import BufferedReader, BytesIO
|
||||
from pathlib import PurePath
|
||||
from typing import Any, Literal, Optional, Union, cast
|
||||
from typing import Any, Dict, Generator, List, Literal, Optional, Union, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, field_validator, model_validator
|
||||
|
||||
@@ -139,7 +138,7 @@ class Blob(BaseMedia):
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def check_blob_is_valid(cls, values: dict[str, Any]) -> Any:
|
||||
def check_blob_is_valid(cls, values: Dict[str, Any]) -> Any:
|
||||
"""Verify that either data or path is provided."""
|
||||
if "data" not in values and "path" not in values:
|
||||
raise ValueError("Either data or path must be provided")
|
||||
@@ -286,7 +285,7 @@ class Document(BaseMedia):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def get_lc_namespace(cls) -> list[str]:
|
||||
def get_lc_namespace(cls) -> List[str]:
|
||||
"""Get the namespace of the langchain object."""
|
||||
return ["langchain", "schema", "document"]
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from typing import Optional
|
||||
from typing import Optional, Sequence
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Sequence
|
||||
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""**Embeddings** interface."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
|
||||
@@ -34,7 +35,7 @@ class Embeddings(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Embed search docs.
|
||||
|
||||
Args:
|
||||
@@ -45,7 +46,7 @@ class Embeddings(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def embed_query(self, text: str) -> list[float]:
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Embed query text.
|
||||
|
||||
Args:
|
||||
@@ -55,7 +56,7 @@ class Embeddings(ABC):
|
||||
Embedding.
|
||||
"""
|
||||
|
||||
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
|
||||
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Asynchronous Embed search docs.
|
||||
|
||||
Args:
|
||||
@@ -66,7 +67,7 @@ class Embeddings(ABC):
|
||||
"""
|
||||
return await run_in_executor(None, self.embed_documents, texts)
|
||||
|
||||
async def aembed_query(self, text: str) -> list[float]:
|
||||
async def aembed_query(self, text: str) -> List[float]:
|
||||
"""Asynchronous Embed query text.
|
||||
|
||||
Args:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user