Compare commits

..

2 Commits

Author SHA1 Message Date
Eugene Yurtsev
aeffbca058 qweqwe 2024-09-13 18:20:48 -04:00
Eugene Yurtsev
3f6057df0d x 2024-09-13 18:02:48 -04:00
396 changed files with 14100 additions and 13571 deletions

View File

@@ -96,21 +96,25 @@ body:
attributes:
label: System Info
description: |
Please share your system info with us. Do NOT skip this step and please don't trim
the output. Most users don't include enough information here and it makes it harder
for us to help you.
Please share your system info with us.
Run the following command in your terminal and paste the output here:
"pip freeze | grep langchain"
platform (windows / linux / mac)
python version
OR if you're on a recent version of langchain-core you can paste the output of:
python -m langchain_core.sys_info
or if you have an existing python interpreter running:
from langchain_core import sys_info
sys_info.print_sys_info()
alternatively, put the entire output of `pip freeze` here.
placeholder: |
"pip freeze | grep langchain"
platform
python version
Alternatively, if you're on a recent version of langchain-core you can paste the output of:
python -m langchain_core.sys_info
These will only surface LangChain packages, don't forget to include any other relevant
packages you're using (if you're not sure what's relevant, you can paste the entire output of `pip freeze`).
validations:
required: true

View File

@@ -19,16 +19,10 @@ MIN_VERSION_LIBS = [
"langchain",
"langchain-text-splitters",
"SQLAlchemy",
"pydantic",
]
# some libs only get checked on release because of simultaneous changes in
# multiple libs
SKIP_IF_PULL_REQUEST = [
"langchain-core",
"langchain-text-splitters",
"langchain",
"langchain-community",
]
SKIP_IF_PULL_REQUEST = ["langchain-core"]
def get_min_version(version: str) -> str:
@@ -74,10 +68,10 @@ def get_min_version_from_toml(
min_versions = {}
# Iterate over the libs in MIN_VERSION_LIBS
for lib in set(MIN_VERSION_LIBS + (include or [])):
for lib in MIN_VERSION_LIBS:
if versions_for == "pull_request" and lib in SKIP_IF_PULL_REQUEST:
# some libs only get checked on release because of simultaneous
# changes in multiple libs
# changes
continue
# Check if the lib is present in the dependencies
if lib in dependencies:
@@ -95,6 +89,7 @@ def get_min_version_from_toml(
if check_python_version(python_version, vs["python"])
][0]["version"]
# Use parse_version to get the minimum supported version from version_string
min_version = get_min_version(version_string)

View File

@@ -85,7 +85,7 @@ jobs:
path: langchain
sparse-checkout: | # this only grabs files for relevant dir
${{ inputs.working-directory }}
ref: ${{ github.ref }} # this scopes to just ref'd branch
ref: master # this scopes to just master branch
fetch-depth: 0 # this fetches entire commit history
- name: Check Tags
id: check-tags
@@ -232,8 +232,7 @@ jobs:
id: min-version
run: |
poetry run pip install packaging
python_version="$(poetry run python --version | awk '{print $2}')"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release $python_version)"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml release ${{ steps.setup-python.outputs.installed-python-version }})"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
echo "min-versions=$min_versions"

View File

@@ -48,6 +48,7 @@ jobs:
shell: bash
run: |
poetry run pip install packaging tomli
echo "Python version ${{ steps.setup-python.outputs.installed-python-version }}"
python_version="$(poetry run python --version | awk '{print $2}')"
min_versions="$(poetry run python $GITHUB_WORKSPACE/.github/scripts/get_min_versions.py pyproject.toml pull_request $python_version)"
echo "min-versions=$min_versions" >> "$GITHUB_OUTPUT"
@@ -58,7 +59,7 @@ jobs:
env:
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
run: |
poetry run pip install $MIN_VERSIONS
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
make tests
working-directory: ${{ inputs.working-directory }}

View File

@@ -39,7 +39,7 @@ conda install langchain -c conda-forge
For these applications, LangChain simplifies the entire application lifecycle:
- **Open-source libraries**: Build your applications using LangChain's open-source [building blocks](https://python.langchain.com/v0.2/docs/concepts#langchain-expression-language-lcel), [components](https://python.langchain.com/v0.2/docs/concepts), and [third-party integrations](https://python.langchain.com/v0.2/docs/integrations/platforms/).
Use [LangGraph](https://langchain-ai.github.io/langgraph/) to build stateful agents with first-class streaming and human-in-the-loop support.
Use [LangGraph](/docs/concepts/#langgraph) to build stateful agents with first-class streaming and human-in-the-loop support.
- **Productionization**: Inspect, monitor, and evaluate your apps with [LangSmith](https://docs.smith.langchain.com/) so that you can constantly optimize and deploy with confidence.
- **Deployment**: Turn your LangGraph applications into production-ready APIs and Assistants with [LangGraph Cloud](https://langchain-ai.github.io/langgraph/cloud/).

View File

@@ -15,7 +15,7 @@
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, assign, as_tool, get_config_jsonschema, get_input_jsonschema, get_output_jsonschema, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, to_json, model_extra, model_fields_set, model_json_schema, predict, apredict, predict_messages, apredict_messages, generate, generate_prompt, agenerate, agenerate_prompt, call_as_llm
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, astream_log, transform, atransform, get_output_schema, get_prompts, config_schema, map, pick, pipe, with_listeners, with_alisteners, with_config, with_fallbacks, with_types, with_retry, InputType, OutputType, config_specs, output_schema, get_input_schema, get_graph, get_name, input_schema, name, bind, assign, as_tool, get_config_jsonschema, get_input_jsonschema, get_output_jsonschema, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, to_json, model_extra, model_fields_set, model_json_schema
.. NOTE:: {{objname}} implements the standard :py:class:`Runnable Interface <langchain_core.runnables.base.Runnable>`. 🏃

View File

@@ -595,10 +595,10 @@ tool_call = ai_msg.tool_calls[0]
# -> ToolCall(args={...}, id=..., ...)
tool_message = tool.invoke(tool_call)
# -> ToolMessage(
# content="tool result foobar...",
# tool_call_id=...,
# name="tool_name"
# )
content="tool result foobar...",
tool_call_id=...,
name="tool_name"
)
```
If you are invoking the tool this way and want to include an [artifact](/docs/concepts/#toolmessage) for the ToolMessage, you will need to have the tool return two things.
@@ -717,6 +717,8 @@ During run-time LangChain configures an appropriate callback manager (e.g., [Cal
The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
The callbacks are available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
- **Request time callbacks**: Passed at the time of the request in addition to the input data.
Available on all standard `Runnable` objects. These callbacks are INHERITED by all children
of the object they are defined on. For example, `chain.invoke({"number": 25}, {"callbacks": [handler]})`.

View File

@@ -206,7 +206,7 @@
" ) -> List[Document]:\n",
" \"\"\"Get docs, adding score information.\"\"\"\n",
" docs, scores = zip(\n",
" *self.vectorstore.similarity_search_with_score(query, **search_kwargs)\n",
" *vectorstore.similarity_search_with_score(query, **search_kwargs)\n",
" )\n",
" for doc, score in zip(docs, scores):\n",
" doc.metadata[\"score\"] = score\n",

View File

@@ -15,15 +15,43 @@
"\n",
"Make sure you have the integration packages installed for any model providers you want to support. E.g. you should have `langchain-openai` installed to init an OpenAI model.\n",
"\n",
":::\n",
"\n",
":::info Requires ``langchain >= 0.2.8``\n",
"\n",
"This functionality was added in ``langchain-core == 0.2.8``. Please make sure your package is up to date.\n",
"\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "165b0de6-9ae3-4e3d-aa98-4fc8a97c4a06",
"metadata": {},
"outputs": [],
"metadata": {
"execution": {
"iopub.execute_input": "2024-09-10T20:22:32.858670Z",
"iopub.status.busy": "2024-09-10T20:22:32.858278Z",
"iopub.status.idle": "2024-09-10T20:22:33.009452Z",
"shell.execute_reply": "2024-09-10T20:22:33.007022Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"zsh:1: 0.2.8 not found\r\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain>=0.2.8 langchain-openai langchain-anthropic langchain-google-vertexai"
]

File diff suppressed because one or more lines are too long

View File

@@ -179,7 +179,7 @@
" b: Annotated[int, ..., \"Second integer\"]\n",
"\n",
"\n",
"class multiply(TypedDict):\n",
"class multiply(BaseModel):\n",
" \"\"\"Multiply two integers.\"\"\"\n",
"\n",
" a: Annotated[int, ..., \"First integer\"]\n",

View File

@@ -13,7 +13,7 @@
"\n",
"This sample demonstrates the use of `Amazon Textract` in combination with LangChain as a DocumentLoader.\n",
"\n",
"`Textract` supports`PDF`, `TIFF`, `PNG` and `JPEG` format.\n",
"`Textract` supports`PDF`, `TIF`F, `PNG` and `JPEG` format.\n",
"\n",
"`Textract` supports these [document sizes, languages and characters](https://docs.aws.amazon.com/textract/latest/dg/limits-document.html)."
]

View File

@@ -61,7 +61,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU firecrawl-py==0.0.20 langchain_community"
"%pip install -qU firecrawl-py langchain_community"
]
},
{

View File

@@ -6,7 +6,7 @@
"source": [
"# Google Speech-to-Text Audio Transcripts\n",
"\n",
"The `SpeechToTextLoader` allows to transcribe audio files with the [Google Cloud Speech-to-Text API](https://cloud.google.com/speech-to-text) and loads the transcribed text into documents.\n",
"The `GoogleSpeechToTextLoader` allows to transcribe audio files with the [Google Cloud Speech-to-Text API](https://cloud.google.com/speech-to-text) and loads the transcribed text into documents.\n",
"\n",
"To use it, you should have the `google-cloud-speech` python package installed, and a Google Cloud project with the [Speech-to-Text API enabled](https://cloud.google.com/speech-to-text/v2/docs/transcribe-client-libraries#before_you_begin).\n",
"\n",
@@ -41,7 +41,7 @@
"source": [
"## Example\n",
"\n",
"The `SpeechToTextLoader` must include the `project_id` and `file_path` arguments. Audio files can be specified as a Google Cloud Storage URI (`gs://...`) or a local file path.\n",
"The `GoogleSpeechToTextLoader` must include the `project_id` and `file_path` arguments. Audio files can be specified as a Google Cloud Storage URI (`gs://...`) or a local file path.\n",
"\n",
"Only synchronous requests are supported by the loader, which has a [limit of 60 seconds or 10MB](https://cloud.google.com/speech-to-text/v2/docs/sync-recognize#:~:text=60%20seconds%20and/or%2010%20MB) per audio file."
]
@@ -52,13 +52,13 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_community import SpeechToTextLoader\n",
"from langchain_google_community import GoogleSpeechToTextLoader\n",
"\n",
"project_id = \"<PROJECT_ID>\"\n",
"file_path = \"gs://cloud-samples-data/speech/audio.flac\"\n",
"# or a local file path: file_path = \"./audio.wav\"\n",
"\n",
"loader = SpeechToTextLoader(project_id=project_id, file_path=file_path)\n",
"loader = GoogleSpeechToTextLoader(project_id=project_id, file_path=file_path)\n",
"\n",
"docs = loader.load()"
]
@@ -152,7 +152,7 @@
" RecognitionConfig,\n",
" RecognitionFeatures,\n",
")\n",
"from langchain_google_community import SpeechToTextLoader\n",
"from langchain_google_community import GoogleSpeechToTextLoader\n",
"\n",
"project_id = \"<PROJECT_ID>\"\n",
"location = \"global\"\n",
@@ -171,7 +171,7 @@
" ),\n",
")\n",
"\n",
"loader = SpeechToTextLoader(\n",
"loader = GoogleSpeechToTextLoader(\n",
" project_id=project_id,\n",
" location=location,\n",
" recognizer_id=recognizer_id,\n",

View File

@@ -31,8 +31,6 @@ The below document loaders allow you to load webpages.
The below document loaders allow you to load PDF documents.
See this guide for a starting point: [How to: load PDF files](/docs/how_to/document_loader_pdf).
<CategoryTable category="pdf_loaders" />
## Cloud Providers

View File

@@ -16,7 +16,7 @@
"\n",
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/document_loaders/file_loaders/unstructured/)|\n",
"| :--- | :--- | :---: | :---: | :---: |\n",
"| [UnstructuredLoader](https://python.langchain.com/api_reference/unstructured/document_loaders/langchain_unstructured.document_loaders.UnstructuredLoader.html) | [langchain_unstructured](https://python.langchain.com/api_reference/unstructured/index.html) | ✅ | ❌ | ✅ | \n",
"| [UnstructuredLoader](https://python.langchain.com/api_reference/unstructured/document_loaders/langchain_unstructured.document_loaders.UnstructuredLoader.html) | [langchain_community](https://python.langchain.com/api_reference/unstructured/index.html) | ✅ | ❌ | ✅ | \n",
"### Loader features\n",
"| Source | Document Lazy Loading | Native Async Support\n",
"| :---: | :---: | :---: | \n",
@@ -519,47 +519,6 @@
"print(\"Length of text in the document:\", len(docs[0].page_content))"
]
},
{
"cell_type": "markdown",
"id": "3ec3c22d-02cd-498b-921f-b839d1404f32",
"metadata": {},
"source": [
"## Loading web pages\n",
"\n",
"`UnstructuredLoader` accepts a `web_url` kwarg when run locally that populates the `url` parameter of the underlying Unstructured [partition](https://docs.unstructured.io/open-source/core-functionality/partitioning). This allows for the parsing of remotely hosted documents, such as HTML web pages.\n",
"\n",
"Example usage:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "bf9a8546-659d-4861-bff2-fdf1ad93ac65",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"page_content='Example Domain' metadata={'category_depth': 0, 'languages': ['eng'], 'filetype': 'text/html', 'url': 'https://www.example.com', 'category': 'Title', 'element_id': 'fdaa78d856f9d143aeeed85bf23f58f8'}\n",
"\n",
"page_content='This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission.' metadata={'languages': ['eng'], 'parent_id': 'fdaa78d856f9d143aeeed85bf23f58f8', 'filetype': 'text/html', 'url': 'https://www.example.com', 'category': 'NarrativeText', 'element_id': '3652b8458b0688639f973fe36253c992'}\n",
"\n",
"page_content='More information...' metadata={'category_depth': 0, 'link_texts': ['More information...'], 'link_urls': ['https://www.iana.org/domains/example'], 'languages': ['eng'], 'filetype': 'text/html', 'url': 'https://www.example.com', 'category': 'Title', 'element_id': '793ab98565d6f6d6f3a6d614e3ace2a9'}\n",
"\n"
]
}
],
"source": [
"from langchain_unstructured import UnstructuredLoader\n",
"\n",
"loader = UnstructuredLoader(web_url=\"https://www.example.com\")\n",
"docs = loader.load()\n",
"\n",
"for doc in docs:\n",
" print(f\"{doc}\\n\")"
]
},
{
"cell_type": "markdown",
"id": "ce01aa40",
@@ -587,7 +546,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.10.13"
}
},
"nbformat": 4,

View File

@@ -6,11 +6,129 @@
"source": [
"# SambaNova\n",
"\n",
"**[SambaNova](https://sambanova.ai/)'s** [Sambastudio](https://sambanova.ai/technology/full-stack-ai-platform) is a platform for running your own open-source models\n",
"**[SambaNova](https://sambanova.ai/)'s** [Sambaverse](https://sambaverse.sambanova.ai/) and [Sambastudio](https://sambanova.ai/technology/full-stack-ai-platform) are platforms for running your own open-source models\n",
"\n",
"This example goes over how to use LangChain to interact with SambaNova models"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sambaverse"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Sambaverse** allows you to interact with multiple open-source models. You can view the list of available models and interact with them in the [playground](https://sambaverse.sambanova.ai/playground).\n",
" **Please note that Sambaverse's free offering is performance-limited.** Companies that are ready to evaluate the production tokens-per-second performance, volume throughput, and 10x lower total cost of ownership (TCO) of SambaNova should [contact us](https://sambaverse.sambanova.ai/contact-us) for a non-limited evaluation instance."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"An API key is required to access Sambaverse models. To get a key, create an account at [sambaverse.sambanova.ai](https://sambaverse.sambanova.ai/)\n",
"\n",
"The [sseclient-py](https://pypi.org/project/sseclient-py/) package is required to run streaming predictions "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --quiet sseclient-py==1.8.0"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Register your API key as an environment variable:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"sambaverse_api_key = \"<Your sambaverse API key>\"\n",
"\n",
"# Set the environment variables\n",
"os.environ[\"SAMBAVERSE_API_KEY\"] = sambaverse_api_key"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call Sambaverse models directly from LangChain!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms.sambanova import Sambaverse\n",
"\n",
"llm = Sambaverse(\n",
" sambaverse_model_name=\"Meta/llama-2-7b-chat-hf\",\n",
" streaming=False,\n",
" model_kwargs={\n",
" \"do_sample\": True,\n",
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
" \"process_prompt\": False,\n",
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
" # \"top_p\": 1.0\n",
" },\n",
")\n",
"\n",
"print(llm.invoke(\"Why should I use open source models?\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Streaming response\n",
"\n",
"from langchain_community.llms.sambanova import Sambaverse\n",
"\n",
"llm = Sambaverse(\n",
" sambaverse_model_name=\"Meta/llama-2-7b-chat-hf\",\n",
" streaming=True,\n",
" model_kwargs={\n",
" \"do_sample\": True,\n",
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
" \"process_prompt\": False,\n",
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
" # \"top_p\": 1.0\n",
" },\n",
")\n",
"\n",
"for chunk in llm.stream(\"Why should I use open source models?\"):\n",
" print(chunk, end=\"\", flush=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},

View File

@@ -10,7 +10,7 @@
Install the python SDK:
```bash
pip install firecrawl-py==0.0.20
pip install firecrawl-py
```
## Document loader

View File

@@ -1,12 +1,12 @@
# MLflow AI Gateway for LLMs
# MLflow Deployments for LLMs
>[The MLflow AI Gateway for LLMs](https://www.mlflow.org/docs/latest/llms/deployments/index.html) is a powerful tool designed to streamline the usage and management of various large
>[The MLflow Deployments for LLMs](https://www.mlflow.org/docs/latest/llms/deployments/index.html) is a powerful tool designed to streamline the usage and management of various large
> language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface
> that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests.
## Installation and Setup
Install `mlflow` with MLflow GenAI dependencies:
Install `mlflow` with MLflow Deployments dependencies:
```sh
pip install 'mlflow[genai]'
@@ -39,10 +39,10 @@ endpoints:
openai_api_key: $OPENAI_API_KEY
```
Start the gateway server:
Start the deployments server:
```sh
mlflow gateway start --config-path /path/to/config.yaml
mlflow deployments start-server --config-path /path/to/config.yaml
```
## Example provided by `MLflow`

View File

@@ -0,0 +1,160 @@
# MLflow AI Gateway
:::warning
MLflow AI Gateway has been deprecated. Please use [MLflow Deployments for LLMs](/docs/integrations/providers/mlflow/) instead.
:::
>[The MLflow AI Gateway](https://www.mlflow.org/docs/latest/index.html) service is a powerful tool designed to streamline the usage and management of various large
> language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface
> that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests.
## Installation and Setup
Install `mlflow` with MLflow AI Gateway dependencies:
```sh
pip install 'mlflow[gateway]'
```
Set the OpenAI API key as an environment variable:
```sh
export OPENAI_API_KEY=...
```
Create a configuration file:
```yaml
routes:
- name: completions
route_type: llm/v1/completions
model:
provider: openai
name: text-davinci-003
config:
openai_api_key: $OPENAI_API_KEY
- name: embeddings
route_type: llm/v1/embeddings
model:
provider: openai
name: text-embedding-ada-002
config:
openai_api_key: $OPENAI_API_KEY
```
Start the Gateway server:
```sh
mlflow gateway start --config-path /path/to/config.yaml
```
## Example provided by `MLflow`
>The `mlflow.langchain` module provides an API for logging and loading `LangChain` models.
> This module exports multivariate LangChain models in the langchain flavor and univariate LangChain
> models in the pyfunc flavor.
See the [API documentation and examples](https://www.mlflow.org/docs/latest/python_api/mlflow.langchain.html?highlight=langchain#module-mlflow.langchain).
## Completions Example
```python
import mlflow
from langchain.chains import LLMChain, PromptTemplate
from langchain_community.llms import MlflowAIGateway
gateway = MlflowAIGateway(
gateway_uri="http://127.0.0.1:5000",
route="completions",
params={
"temperature": 0.0,
"top_p": 0.1,
},
)
llm_chain = LLMChain(
llm=gateway,
prompt=PromptTemplate(
input_variables=["adjective"],
template="Tell me a {adjective} joke",
),
)
result = llm_chain.run(adjective="funny")
print(result)
with mlflow.start_run():
model_info = mlflow.langchain.log_model(chain, "model")
model = mlflow.pyfunc.load_model(model_info.model_uri)
print(model.predict([{"adjective": "funny"}]))
```
## Embeddings Example
```python
from langchain_community.embeddings import MlflowAIGatewayEmbeddings
embeddings = MlflowAIGatewayEmbeddings(
gateway_uri="http://127.0.0.1:5000",
route="embeddings",
)
print(embeddings.embed_query("hello"))
print(embeddings.embed_documents(["hello"]))
```
## Chat Example
```python
from langchain_community.chat_models import ChatMLflowAIGateway
from langchain_core.messages import HumanMessage, SystemMessage
chat = ChatMLflowAIGateway(
gateway_uri="http://127.0.0.1:5000",
route="chat",
params={
"temperature": 0.1
}
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
HumanMessage(
content="Translate this sentence from English to French: I love programming."
),
]
print(chat(messages))
```
## Databricks MLflow AI Gateway
Databricks MLflow AI Gateway is in private preview.
Please contact a Databricks representative to enroll in the preview.
```python
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_community.llms import MlflowAIGateway
gateway = MlflowAIGateway(
gateway_uri="databricks",
route="completions",
)
llm_chain = LLMChain(
llm=gateway,
prompt=PromptTemplate(
input_variables=["adjective"],
template="Tell me a {adjective} joke",
),
)
result = llm_chain.run(adjective="funny")
print(result)
```

File diff suppressed because one or more lines are too long

View File

@@ -400,29 +400,18 @@
"def hybrid_query(search_query: str) -> Dict:\n",
" vector = embeddings.embed_query(search_query) # same embeddings as for indexing\n",
" return {\n",
" \"retriever\": {\n",
" \"rrf\": {\n",
" \"retrievers\": [\n",
" {\n",
" \"standard\": {\n",
" \"query\": {\n",
" \"match\": {\n",
" text_field: search_query,\n",
" }\n",
" }\n",
" }\n",
" },\n",
" {\n",
" \"knn\": {\n",
" \"field\": dense_vector_field,\n",
" \"query_vector\": vector,\n",
" \"k\": 5,\n",
" \"num_candidates\": 10,\n",
" }\n",
" },\n",
" ]\n",
" }\n",
" }\n",
" \"query\": {\n",
" \"match\": {\n",
" text_field: search_query,\n",
" },\n",
" },\n",
" \"knn\": {\n",
" \"field\": dense_vector_field,\n",
" \"query_vector\": vector,\n",
" \"k\": 5,\n",
" \"num_candidates\": 10,\n",
" },\n",
" \"rank\": {\"rrf\": {}},\n",
" }\n",
"\n",
"\n",

View File

@@ -21,7 +21,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-google-community"
"%pip install --upgrade --quiet langchain_google_community"
]
},
{

View File

@@ -99,7 +99,7 @@
"vector_store = Chroma(\n",
" collection_name=\"example_collection\",\n",
" embedding_function=embeddings,\n",
" persist_directory=\"./chroma_langchain_db\", # Where to save data locally, remove if not necessary\n",
" persist_directory=\"./chroma_langchain_db\", # Where to save data locally, remove if not neccesary\n",
")"
]
},
@@ -179,7 +179,7 @@
"from langchain_core.documents import Document\n",
"\n",
"document_1 = Document(\n",
" page_content=\"I had chocolate chip pancakes and scrambled eggs for breakfast this morning.\",\n",
" page_content=\"I had chocalate chip pancakes and scrambled eggs for breakfast this morning.\",\n",
" metadata={\"source\": \"tweet\"},\n",
" id=1,\n",
")\n",
@@ -273,7 +273,7 @@
"outputs": [],
"source": [
"updated_document_1 = Document(\n",
" page_content=\"I had chocolate chip pancakes and fried eggs for breakfast this morning.\",\n",
" page_content=\"I had chocalate chip pancakes and fried eggs for breakfast this morning.\",\n",
" metadata={\"source\": \"tweet\"},\n",
" id=1,\n",
")\n",
@@ -287,7 +287,7 @@
"vector_store.update_document(document_id=uuids[0], document=updated_document_1)\n",
"# You can also update multiple documents at once\n",
"vector_store.update_documents(\n",
" ids=uuids[:2], documents=[updated_document_1, updated_document_2]\n",
" ids=uuids[:2], documents=[updated_document_1, updated_document_1]\n",
")"
]
},

View File

@@ -380,7 +380,7 @@
"source": [
"## API reference\n",
"\n",
"For detailed documentation of all `Clickhouse` features and configurations head to the API reference:https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.clickhouse.Clickhouse.html"
"For detailed documentation of all `AstraDBVectorStore` features and configurations head to the API reference:https://python.langchain.com/api_reference/community/vectorstores/langchain_community.vectorstores.clickhouse.Clickhouse.html"
]
}
],

View File

@@ -85,8 +85,8 @@ Build stateful, multi-actor applications with LLMs. Integrates smoothly with Lan
## Additional resources
### [Versions](/docs/versions/v0_3/)
See what changed in v0.3, learn how to migrate legacy code, read up on our versioning policies, and more.
### [Versions](/docs/versions/overview/)
See what changed in v0.2, learn how to migrate legacy code, and read up on our release/versioning policies, and more.
### [Security](/docs/security)
Read up on [security](/docs/security) best practices to make sure you're developing safely with LangChain.

View File

@@ -90,7 +90,7 @@
"source": [
" </TabItem>\n",
" <TabItem value=\"conda\" label=\"Conda\">\n",
" <CodeBlock language=\"bash\">conda install langchain langchain-community langchain-chroma -c conda-forge</CodeBlock>\n",
" <CodeBlock language=\"bash\">conda install langchain langchain_community langchain_chroma -c conda-forge</CodeBlock>\n",
" </TabItem>\n",
"</Tabs>\n",
"\n",

View File

@@ -1,554 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
"metadata": {},
"source": [
"# Migrating off ConversationBufferMemory or ConversationStringBufferMemory\n",
"\n",
"[ConversationBufferMemory](https://python.langchain.com/api_reference/langchain/memory/langchain.memory.buffer.ConversationBufferMemory.html)\n",
"and [ConversationStringBufferMemory](https://python.langchain.com/api_reference/langchain/memory/langchain.memory.buffer.ConversationStringBufferMemory.html)\n",
" were used to keep track of a conversation between a human and an ai asstistant without any additional processing. \n",
"\n",
"\n",
":::note\n",
"The `ConversationStringBufferMemory` is equivalent to `ConversationBufferMemory` but was targeting LLMs that were not chat models.\n",
":::\n",
"\n",
"The methods for handling conversation history using existing modern primitives are:\n",
"\n",
"1. Using [LangGraph persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/) along with appropriate processing of the message history\n",
"2. Using LCEL with [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#) combined with appropriate processing of the message history.\n",
"\n",
"Most users will find [LangGraph persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/) both easier to use and configure than the equivalent LCEL, especially for more complex use cases."
]
},
{
"cell_type": "markdown",
"id": "d07f9459-9fb6-4942-99c9-64558aedd7d4",
"metadata": {},
"source": [
"## Set up"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "b99b47ec",
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install --upgrade --quiet langchain-openai langchain"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "717c8673",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
"\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
"cell_type": "markdown",
"id": "e3621b62-a037-42b8-8faa-59575608bb8b",
"metadata": {},
"source": [
"## Usage with LLMChain / ConversationChain\n",
"\n",
"This section shows how to migrate off `ConversationBufferMemory` or `ConversationStringBufferMemory` that's used together with either an `LLMChain` or a `ConversationChain`.\n",
"\n",
"### Legacy\n",
"\n",
"Below is example usage of `ConversationBufferMemory` with an `LLMChain` or an equivalent `ConversationChain`.\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "8b6e1063-cf3a-456a-bf7d-830e5c1d2864",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'text': 'Hello Bob! How can I assist you today?', 'chat_history': [HumanMessage(content='my name is bob', additional_kwargs={}, response_metadata={}), AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, response_metadata={})]}\n"
]
}
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_core.messages import SystemMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" MessagesPlaceholder,\n",
")\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
" HumanMessagePromptTemplate.from_template(\"{text}\"),\n",
" ]\n",
")\n",
"\n",
"# highlight-start\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"# highlight-end\n",
"\n",
"legacy_chain = LLMChain(\n",
" llm=ChatOpenAI(),\n",
" prompt=prompt,\n",
" # highlight-next-line\n",
" memory=memory,\n",
")\n",
"\n",
"legacy_result = legacy_chain.invoke({\"text\": \"my name is bob\"})\n",
"print(legacy_result)\n",
"\n",
"legacy_result = legacy_chain.invoke({\"text\": \"what was my name\"})"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "c7fa1618",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Your name is Bob. How can I assist you today, Bob?'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"legacy_result[\"text\"]"
]
},
{
"cell_type": "markdown",
"id": "3599774f-b56e-4ba3-876c-624f0270b8ac",
"metadata": {},
"source": [
":::note\n",
"Note that there is no support for separating conversation threads in a single memory object\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
"metadata": {},
"source": [
"</details>\n",
"\n",
"### LangGraph\n",
"\n",
"The example below shows how to use LangGraph to implement a `ConversationChain` or `LLMChain` with `ConversationBufferMemory`.\n",
"\n",
"This example assumes that you're already somewhat familiar with `LangGraph`. If you're not, then please see the [LangGraph Quickstart Guide](https://langchain-ai.github.io/langgraph/tutorials/introduction/) for more details.\n",
"\n",
"`LangGraph` offers a lot of additional functionality (e.g., time-travel and interrupts) and will work well for other more complex (and realistic) architectures.\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e591965c-c4d7-4df7-966d-4d14bd46e157",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"hi! I'm bob\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Hello Bob! How can I assist you today?\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"what was my name?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Your name is Bob. How can I help you today, Bob?\n"
]
}
],
"source": [
"import uuid\n",
"\n",
"from IPython.display import Image, display\n",
"from langchain_core.messages import HumanMessage\n",
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.graph import START, MessagesState, StateGraph\n",
"\n",
"# Define a new graph\n",
"workflow = StateGraph(state_schema=MessagesState)\n",
"\n",
"# Define a chat model\n",
"model = ChatOpenAI()\n",
"\n",
"\n",
"# Define the function that calls the model\n",
"def call_model(state: MessagesState):\n",
" response = model.invoke(state[\"messages\"])\n",
" # We return a list, because this will get added to the existing list\n",
" return {\"messages\": response}\n",
"\n",
"\n",
"# Define the two nodes we will cycle between\n",
"workflow.add_edge(START, \"model\")\n",
"workflow.add_node(\"model\", call_model)\n",
"\n",
"\n",
"# Adding memory is straight forward in langgraph!\n",
"# highlight-next-line\n",
"memory = MemorySaver()\n",
"\n",
"app = workflow.compile(\n",
" # highlight-next-line\n",
" checkpointer=memory\n",
")\n",
"\n",
"\n",
"# The thread id is a unique key that identifies\n",
"# this particular conversation.\n",
"# We'll just generate a random uuid here.\n",
"# This enables a single application to manage conversations among multiple users.\n",
"thread_id = uuid.uuid4()\n",
"# highlight-next-line\n",
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
"\n",
"\n",
"input_message = HumanMessage(content=\"hi! I'm bob\")\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()\n",
"\n",
"# Here, let's confirm that the AI remembers our name!\n",
"input_message = HumanMessage(content=\"what was my name?\")\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "9893029f-43f3-4703-89bf-e0e8fa18aff3",
"metadata": {},
"source": [
"</details>\n",
"\n",
"### LCEL RunnableWithMessageHistory\n",
"\n",
"Alternatively, if you have a simple chain, you can wrap the chat model of the chain within a [RunnableWithMessageHistory](https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html).\n",
"\n",
"Please refer to the following [migration guide](/docs/versions/migrating_chains/conversation_chain/) for more information.\n",
"\n",
"\n",
"## Usasge with a pre-built agent\n",
"\n",
"This example shows usage of an Agent Executor with a pre-built agent constructed using the [create_tool_calling_agent](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) function.\n",
"\n",
"If you are using one of the [old LangChain pre-built agents](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/), you should be able\n",
"to replace that code with the new [langgraph pre-built agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/) which leverages\n",
"native tool calling capabilities of chat models and will likely work better out of the box.\n",
"\n",
"### Legacy Usage\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "dc2928de-d7a4-4f87-ab96-59bde9a3829f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'input': 'hi! my name is bob what is my age?', 'chat_history': [HumanMessage(content='hi! my name is bob what is my age?', additional_kwargs={}, response_metadata={}), AIMessage(content='Bob, you are 42 years old.', additional_kwargs={}, response_metadata={})], 'output': 'Bob, you are 42 years old.'}\n",
"\n",
"{'input': 'do you remember my name?', 'chat_history': [HumanMessage(content='hi! my name is bob what is my age?', additional_kwargs={}, response_metadata={}), AIMessage(content='Bob, you are 42 years old.', additional_kwargs={}, response_metadata={}), HumanMessage(content='do you remember my name?', additional_kwargs={}, response_metadata={}), AIMessage(content='Yes, your name is Bob.', additional_kwargs={}, response_metadata={})], 'output': 'Yes, your name is Bob.'}\n"
]
}
],
"source": [
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(temperature=0)\n",
"\n",
"\n",
"@tool\n",
"def get_user_age(name: str) -> str:\n",
" \"\"\"Use this tool to find the user's age.\"\"\"\n",
" # This is a placeholder for the actual implementation\n",
" if \"bob\" in name.lower():\n",
" return \"42 years old\"\n",
" return \"41 years old\"\n",
"\n",
"\n",
"tools = [get_user_age]\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" (\"human\", \"{input}\"),\n",
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
" ]\n",
")\n",
"\n",
"# Construct the Tools agent\n",
"agent = create_tool_calling_agent(model, tools, prompt)\n",
"# Instantiate memory\n",
"# highlight-start\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"# highlight-end\n",
"\n",
"# Create an agent\n",
"agent = create_tool_calling_agent(model, tools, prompt)\n",
"agent_executor = AgentExecutor(\n",
" agent=agent,\n",
" tools=tools,\n",
" # highlight-next-line\n",
" memory=memory, # Pass the memory to the executor\n",
")\n",
"\n",
"# Verify that the agent can use tools\n",
"print(agent_executor.invoke({\"input\": \"hi! my name is bob what is my age?\"}))\n",
"print()\n",
"# Verify that the agent has access to conversation history.\n",
"# The agent should be able to answer that the user's name is bob.\n",
"print(agent_executor.invoke({\"input\": \"do you remember my name?\"}))"
]
},
{
"cell_type": "markdown",
"id": "a4866ae9-e683-44dc-a77b-da1737d3a645",
"metadata": {},
"source": [
"</details>\n",
"\n",
"### LangGraph\n",
"\n",
"You can follow the standard LangChain tutorial for [building an agent](/docs/tutorials/agents/) an in depth explanation of how this works.\n",
"\n",
"This example is shown here explicitly to make it easier for users to compare the legacy implementation vs. the corresponding langgraph implementation.\n",
"\n",
"This example shows how to add memory to the [pre-built react agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) in langgraph.\n",
"\n",
"For more details, please see the [how to add memory to the prebuilt ReAct agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent-memory/) guide in langgraph.\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "bdb29c9b-bc57-4512-9430-c5d5e3f91e3c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"hi! I'm bob. What is my age?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"Tool Calls:\n",
" get_user_age (call_oEDwEbIDNdokwqhAV6Azn47c)\n",
" Call ID: call_oEDwEbIDNdokwqhAV6Azn47c\n",
" Args:\n",
" name: bob\n",
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
"Name: get_user_age\n",
"\n",
"42 years old\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Bob, you are 42 years old! If you need any more assistance or information, feel free to ask.\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"do you remember my name?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Yes, your name is Bob. If you have any other questions or need assistance, feel free to ask!\n"
]
}
],
"source": [
"import uuid\n",
"\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"\n",
"@tool\n",
"def get_user_age(name: str) -> str:\n",
" \"\"\"Use this tool to find the user's age.\"\"\"\n",
" # This is a placeholder for the actual implementation\n",
" if \"bob\" in name.lower():\n",
" return \"42 years old\"\n",
" return \"41 years old\"\n",
"\n",
"\n",
"# highlight-next-line\n",
"memory = MemorySaver()\n",
"model = ChatOpenAI()\n",
"app = create_react_agent(\n",
" model,\n",
" tools=[get_user_age],\n",
" # highlight-next-line\n",
" checkpointer=memory,\n",
")\n",
"\n",
"# highlight-start\n",
"# The thread id is a unique key that identifies\n",
"# this particular conversation.\n",
"# We'll just generate a random uuid here.\n",
"# This enables a single application to manage conversations among multiple users.\n",
"thread_id = uuid.uuid4()\n",
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
"# highlight-end\n",
"\n",
"# Tell the AI that our name is Bob, and ask it to use a tool to confirm\n",
"# that it's capable of working like an agent.\n",
"input_message = HumanMessage(content=\"hi! I'm bob. What is my age?\")\n",
"\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()\n",
"\n",
"# Confirm that the chat bot has access to previous conversation\n",
"# and can respond to the user saying that the user's name is Bob.\n",
"input_message = HumanMessage(content=\"do you remember my name?\")\n",
"\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "87d14cef-a51e-44be-b376-f31b723caaf8",
"metadata": {},
"source": [
"If we use a different thread ID, it'll start a new conversation and the bot will not know our name!"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "fe63e424-1111-4f6a-a9c9-0887eb150ab0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"hi! do you remember my name?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Hello! Yes, I remember your name. It's great to see you again! How can I assist you today?\n"
]
}
],
"source": [
"config = {\"configurable\": {\"thread_id\": \"123456789\"}}\n",
"\n",
"input_message = HumanMessage(content=\"hi! do you remember my name?\")\n",
"\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "b2717810",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Next steps\n",
"\n",
"Explore persistence with LangGraph:\n",
"\n",
"* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)\n",
"* [How to add persistence (\"memory\") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)\n",
"* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)\n",
"* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)\n",
"\n",
"Add persistence with simple LCEL (favor langgraph for more complex use cases):\n",
"\n",
"* [How to add message history](/docs/how_to/message_history/)\n",
"\n",
"Working with message history:\n",
"\n",
"* [How to trim messages](/docs/how_to/trim_messages)\n",
"* [How to filter messages](/docs/how_to/filter_messages/)\n",
"* [How to merge message runs](/docs/how_to/merge_message_runs/)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ce4c48e1-b613-4aab-bc2b-617c811fad1d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,728 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
"metadata": {},
"source": [
"# Migrating off ConversationBufferWindowMemory or ConversationTokenBufferMemory\n",
"\n",
"Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n",
"\n",
"\n",
"| Memory Type | Description |\n",
"|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
"| `ConversationBufferWindowMemory` | Keeps the last `n` messages of the conversation. Drops the oldest messages when there are more than `n` messages. |\n",
"| `ConversationTokenBufferMemory` | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n",
"\n",
"`ConversationBufferWindowMemory` and `ConversationTokenBufferMemory` apply additional processing on top of the raw conversation history to trim the conversation history to a size that fits inside the context window of a chat model. \n",
"\n",
"This processing functionality can be accomplished using LangChain's built-in [trim_messages](https://python.langchain.com/api_reference/core/messages/langchain_core.messages.utils.trim_messages.html) function."
]
},
{
"cell_type": "markdown",
"id": "79935247-acc7-4a05-a387-5d72c9c8c8cb",
"metadata": {},
"source": [
":::important\n",
"\n",
"Well begin by exploring a straightforward method that involves applying processing logic to the entire conversation history.\n",
"\n",
"While this approach is easy to implement, it has a downside: as the conversation grows, so does the latency, since the logic is re-applied to all previous exchanges in the conversation at each turn.\n",
"\n",
"More advanced strategies focus on incrementally updating the conversation history to avoid redundant processing.\n",
"\n",
"For instance, the langgraph [how-to guide on summarization](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/) demonstrates\n",
"how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns.\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "d07f9459-9fb6-4942-99c9-64558aedd7d4",
"metadata": {},
"source": [
"## Set up"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "b99b47ec",
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install --upgrade --quiet langchain-openai langchain"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7127478f-4413-48be-bfec-d0cd91b8cf70",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
"\n",
"if \"OPENAI_API_KEY\" not in os.environ:\n",
" os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
"cell_type": "markdown",
"id": "d6a7bc93-21a9-44c8-842e-9cc82f1ada7c",
"metadata": {},
"source": [
"## Legacy usage with LLMChain / Conversation Chain\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "371616e1-ca41-4a57-99e0-5fbf7d63f2ad",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'text': 'Nice to meet you, Bob! How can I assist you today?', 'chat_history': []}\n",
"{'text': 'Your name is Bob. How can I assist you further, Bob?', 'chat_history': [HumanMessage(content='my name is bob', additional_kwargs={}, response_metadata={}), AIMessage(content='Nice to meet you, Bob! How can I assist you today?', additional_kwargs={}, response_metadata={})]}\n"
]
}
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferWindowMemory\n",
"from langchain_core.messages import SystemMessage\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" MessagesPlaceholder,\n",
")\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate(\n",
" [\n",
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
" MessagesPlaceholder(variable_name=\"chat_history\"),\n",
" HumanMessagePromptTemplate.from_template(\"{text}\"),\n",
" ]\n",
")\n",
"\n",
"# highlight-start\n",
"memory = ConversationBufferWindowMemory(memory_key=\"chat_history\", return_messages=True)\n",
"# highlight-end\n",
"\n",
"legacy_chain = LLMChain(\n",
" llm=ChatOpenAI(),\n",
" prompt=prompt,\n",
" # highlight-next-line\n",
" memory=memory,\n",
")\n",
"\n",
"legacy_result = legacy_chain.invoke({\"text\": \"my name is bob\"})\n",
"print(legacy_result)\n",
"\n",
"legacy_result = legacy_chain.invoke({\"text\": \"what was my name\"})\n",
"print(legacy_result)"
]
},
{
"cell_type": "markdown",
"id": "f48cac47-c8b6-444c-8e1b-f7115c0b2d8d",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Reimplementing ConversationBufferWindowMemory logic\n",
"\n",
"Let's first create appropriate logic to process the conversation history, and then we'll see how to integrate it into an application. You can later replace this basic setup with more advanced logic tailored to your specific needs.\n",
"\n",
"We'll use `trim_messages` to implement logic that keeps the last `n` messages of the conversation. It will drop the oldest messages when the number of messages exceeds `n`.\n",
"\n",
"In addition, we will also keep the system message if it's present -- when present, it's the first message in a conversation that includes instructions for the chat model."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "0a92b3f3-0315-46ac-bb28-d07398dd23ea",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import (\n",
" AIMessage,\n",
" BaseMessage,\n",
" HumanMessage,\n",
" SystemMessage,\n",
" trim_messages,\n",
")\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"messages = [\n",
" SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n",
" HumanMessage(\"i wonder why it's called langchain\"),\n",
" AIMessage(\n",
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
" ),\n",
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
" AIMessage(\n",
" \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n",
" ),\n",
" HumanMessage(\"why is 42 always the answer?\"),\n",
" AIMessage(\n",
" \"Because its the only number thats constantly right, even when it doesnt add up!\"\n",
" ),\n",
" HumanMessage(\"What did the cow say?\"),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e7ddf8dc-ea27-43e2-8800-9f7c1d4abdc1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m System Message \u001b[0m================================\n",
"\n",
"you're a good assistant, you always respond with a joke.\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Hmmm let me think.\n",
"\n",
"Why, he's probably chasing after the last cup of coffee in the office!\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"why is 42 always the answer?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Because its the only number thats constantly right, even when it doesnt add up!\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"What did the cow say?\n"
]
}
],
"source": [
"from langchain_core.messages import trim_messages\n",
"\n",
"selected_messages = trim_messages(\n",
" messages,\n",
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
")\n",
"\n",
"for msg in selected_messages:\n",
" msg.pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "18f73819-05e0-41f3-a0e7-a5fd6701d9ef",
"metadata": {},
"source": [
"## Reimplementing ConversationTokenBufferMemory logic\n",
"\n",
"Here, we'll use `trim_messages` to keeps the system message and the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. \n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6442f74b-2c36-48fd-a3d1-c7c5d18c050f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m System Message \u001b[0m================================\n",
"\n",
"you're a good assistant, you always respond with a joke.\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"why is 42 always the answer?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Because its the only number thats constantly right, even when it doesnt add up!\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"What did the cow say?\n"
]
}
],
"source": [
"from langchain_core.messages import trim_messages\n",
"\n",
"selected_messages = trim_messages(\n",
" messages,\n",
" # Please see API reference for trim_messages for other ways to specify a token counter.\n",
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
" max_tokens=80, # <-- token limit\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" strategy=\"last\",\n",
" include_system=True, # <-- Keep the system message\n",
")\n",
"\n",
"for msg in selected_messages:\n",
" msg.pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "0f05d272-2d22-44b7-9fa6-e617a48584b4",
"metadata": {},
"source": [
"## Modern usage with LangGraph\n",
"\n",
"The example below shows how to use LangGraph to add simple conversation pre-processing logic.\n",
"\n",
":::note\n",
"\n",
"If you want to avoid running the computation on the entire conversation history each time, you can follow\n",
"the [how-to guide on summarization](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/) that demonstrates\n",
"how to discard older messages, ensuring they aren't re-processed during later turns.\n",
"\n",
":::\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "7d6f79a3-fda7-48fd-9128-bbe4aad84199",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"hi! I'm bob\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Hello Bob! How can I assist you today?\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"what was my name?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Your name is Bob. How can I help you, Bob?\n"
]
}
],
"source": [
"import uuid\n",
"\n",
"from IPython.display import Image, display\n",
"from langchain_core.messages import HumanMessage\n",
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.graph import START, MessagesState, StateGraph\n",
"\n",
"# Define a new graph\n",
"workflow = StateGraph(state_schema=MessagesState)\n",
"\n",
"# Define a chat model\n",
"model = ChatOpenAI()\n",
"\n",
"\n",
"# Define the function that calls the model\n",
"def call_model(state: MessagesState):\n",
" # highlight-start\n",
" selected_messages = trim_messages(\n",
" state[\"messages\"],\n",
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
" )\n",
"\n",
" # highlight-end\n",
" response = model.invoke(selected_messages)\n",
" # We return a list, because this will get added to the existing list\n",
" return {\"messages\": response}\n",
"\n",
"\n",
"# Define the two nodes we will cycle between\n",
"workflow.add_edge(START, \"model\")\n",
"workflow.add_node(\"model\", call_model)\n",
"\n",
"\n",
"# Adding memory is straight forward in langgraph!\n",
"# highlight-next-line\n",
"memory = MemorySaver()\n",
"\n",
"app = workflow.compile(\n",
" # highlight-next-line\n",
" checkpointer=memory\n",
")\n",
"\n",
"\n",
"# The thread id is a unique key that identifies\n",
"# this particular conversation.\n",
"# We'll just generate a random uuid here.\n",
"thread_id = uuid.uuid4()\n",
"# highlight-next-line\n",
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
"\n",
"input_message = HumanMessage(content=\"hi! I'm bob\")\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()\n",
"\n",
"# Here, let's confirm that the AI remembers our name!\n",
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
"input_message = HumanMessage(content=\"what was my name?\")\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"cell_type": "markdown",
"id": "84229e2e-a578-4b21-840a-814223406402",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Usage with a pre-built langgraph agent\n",
"\n",
"This example shows usage of an Agent Executor with a pre-built agent constructed using the [create_tool_calling_agent](https://api.python.langchain.com/en/latest/agents/langchain.agents.tool_calling_agent.base.create_tool_calling_agent.html) function.\n",
"\n",
"If you are using one of the [old LangChain pre-built agents](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/), you should be able\n",
"to replace that code with the new [langgraph pre-built agent](https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/) which leverages\n",
"native tool calling capabilities of chat models and will likely work better out of the box.\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "f671db87-8f01-453e-81fd-4e603140a512",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"hi! I'm bob. What is my age?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"Tool Calls:\n",
" get_user_age (call_jsMvoIFv970DhqqLCJDzPKsp)\n",
" Call ID: call_jsMvoIFv970DhqqLCJDzPKsp\n",
" Args:\n",
" name: bob\n",
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
"Name: get_user_age\n",
"\n",
"42 years old\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Bob, you are 42 years old.\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
"do you remember my name?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Yes, your name is Bob.\n"
]
}
],
"source": [
"import uuid\n",
"\n",
"from langchain_core.messages import (\n",
" AIMessage,\n",
" BaseMessage,\n",
" HumanMessage,\n",
" SystemMessage,\n",
" trim_messages,\n",
")\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"\n",
"@tool\n",
"def get_user_age(name: str) -> str:\n",
" \"\"\"Use this tool to find the user's age.\"\"\"\n",
" # This is a placeholder for the actual implementation\n",
" if \"bob\" in name.lower():\n",
" return \"42 years old\"\n",
" return \"41 years old\"\n",
"\n",
"\n",
"memory = MemorySaver()\n",
"model = ChatOpenAI()\n",
"\n",
"\n",
"# highlight-start\n",
"def state_modifier(state) -> list[BaseMessage]:\n",
" \"\"\"Given the agent state, return a list of messages for the chat model.\"\"\"\n",
" # We're using the message processor defined above.\n",
" return trim_messages(\n",
" state[\"messages\"],\n",
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
" )\n",
"\n",
"\n",
"# highlight-end\n",
"\n",
"app = create_react_agent(\n",
" model,\n",
" tools=[get_user_age],\n",
" checkpointer=memory,\n",
" # highlight-next-line\n",
" state_modifier=state_modifier,\n",
")\n",
"\n",
"# The thread id is a unique key that identifies\n",
"# this particular conversation.\n",
"# We'll just generate a random uuid here.\n",
"thread_id = uuid.uuid4()\n",
"config = {\"configurable\": {\"thread_id\": thread_id}}\n",
"\n",
"# Tell the AI that our name is Bob, and ask it to use a tool to confirm\n",
"# that it's capable of working like an agent.\n",
"input_message = HumanMessage(content=\"hi! I'm bob. What is my age?\")\n",
"\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()\n",
"\n",
"# Confirm that the chat bot has access to previous conversation\n",
"# and can respond to the user saying that the user's name is Bob.\n",
"input_message = HumanMessage(content=\"do you remember my name?\")\n",
"\n",
"for event in app.stream({\"messages\": [input_message]}, config, stream_mode=\"values\"):\n",
" event[\"messages\"][-1].pretty_print()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "f4d16e09-1d90-4153-8576-6d3996cb5a6c",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## LCEL: Add a preprocessing step\n",
"\n",
"The simplest way to add complex conversation management is by introducing a pre-processing step in front of the chat model and pass the full conversation history to the pre-processing step.\n",
"\n",
"This approach is conceptually simple and will work in many situations; for example, if using a [RunnableWithMessageHistory](/docs/how_to/message_history/) instead of wrapping the chat model, wrap the chat model with the pre-processor.\n",
"\n",
"The obvious downside of this approach is that latency starts to increase as the conversation history grows because of two reasons:\n",
"\n",
"1. As the conversation gets longer, more data may need to be fetched from whatever store your'e using to store the conversation history (if not storing it in memory).\n",
"2. The pre-processing logic will end up doing a lot of redundant computation, repeating computation from previous steps of the conversation.\n",
"\n",
":::caution\n",
"\n",
"If you want to use a chat model's tool calling capabilities, remember to bind the tools to the model before adding the history pre-processing step to it!\n",
"\n",
":::\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "072046bb-3892-4206-8ae5-025e93110dcc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"Tool Calls:\n",
" what_did_the_cow_say (call_urHTB5CShhcKz37QiVzNBlIS)\n",
" Call ID: call_urHTB5CShhcKz37QiVzNBlIS\n",
" Args:\n"
]
}
],
"source": [
"from langchain_core.messages import (\n",
" AIMessage,\n",
" BaseMessage,\n",
" HumanMessage,\n",
" SystemMessage,\n",
" trim_messages,\n",
")\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"\n",
"\n",
"@tool\n",
"def what_did_the_cow_say() -> str:\n",
" \"\"\"Check to see what the cow said.\"\"\"\n",
" return \"foo\"\n",
"\n",
"\n",
"# highlight-start\n",
"message_processor = trim_messages( # Returns a Runnable if no messages are provided\n",
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
")\n",
"# highlight-end\n",
"\n",
"# Note that we bind tools to the model first!\n",
"model_with_tools = model.bind_tools([what_did_the_cow_say])\n",
"\n",
"# highlight-next-line\n",
"model_with_preprocessor = message_processor | model_with_tools\n",
"\n",
"full_history = [\n",
" SystemMessage(\"you're a good assistant, you always respond with a joke.\"),\n",
" HumanMessage(\"i wonder why it's called langchain\"),\n",
" AIMessage(\n",
" 'Well, I guess they thought \"WordRope\" and \"SentenceString\" just didn\\'t have the same ring to it!'\n",
" ),\n",
" HumanMessage(\"and who is harrison chasing anyways\"),\n",
" AIMessage(\n",
" \"Hmmm let me think.\\n\\nWhy, he's probably chasing after the last cup of coffee in the office!\"\n",
" ),\n",
" HumanMessage(\"why is 42 always the answer?\"),\n",
" AIMessage(\n",
" \"Because its the only number thats constantly right, even when it doesnt add up!\"\n",
" ),\n",
" HumanMessage(\"What did the cow say?\"),\n",
"]\n",
"\n",
"\n",
"# We pass it explicity to the model_with_preprocesor for illustrative purposes.\n",
"# If you're using `RunnableWithMessageHistory` the history will be automatically\n",
"# read from the source the you configure.\n",
"model_with_preprocessor.invoke(full_history).pretty_print()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "5da7225a-5e94-4f53-bb0d-86b6b528d150",
"metadata": {},
"source": [
"</details>\n",
"\n",
"If you need to implement more efficient logic and want to use `RunnableWithMessageHistory` for now the way to achieve this\n",
"is to subclass from [BaseChatMessageHistory](https://api.python.langchain.com/en/latest/chat_history/langchain_core.chat_history.BaseChatMessageHistory.html) and\n",
"define appropriate logic for `add_messages` (that doesn't simply append the history, but instead re-writes it).\n",
"\n",
"Unless you have a good reason to implement this solution, you should instead use LangGraph."
]
},
{
"cell_type": "markdown",
"id": "b2717810",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Explore persistence with LangGraph:\n",
"\n",
"* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)\n",
"* [How to add persistence (\"memory\") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)\n",
"* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)\n",
"* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)\n",
"\n",
"Add persistence with simple LCEL (favor langgraph for more complex use cases):\n",
"\n",
"* [How to add message history](/docs/how_to/message_history/)\n",
"\n",
"Working with message history:\n",
"\n",
"* [How to trim messages](/docs/how_to/trim_messages)\n",
"* [How to filter messages](/docs/how_to/filter_messages/)\n",
"* [How to merge message runs](/docs/how_to/merge_message_runs/)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4adad0b-3e25-47d9-a8e6-6a9c6c616f14",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,45 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ce8457ed-c0b1-4a74-abbd-9d3d2211270f",
"metadata": {},
"source": [
"# Migrating off ConversationSummaryMemory or ConversationSummaryBufferMemory\n",
"\n",
"Follow this guide if you're trying to migrate off one of the old memory classes listed below:\n",
"\n",
"\n",
"| Memory Type | Description |\n",
"|---------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------|\n",
"| `ConversationSummaryMemory` | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |\n",
"| `ConversationSummaryBufferMemory` | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |\n",
"\n",
"Please follow the following [how-to guide on summarization](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/) in LangGraph. \n",
"\n",
"This guide shows how to maintain a running summary of the conversation while discarding older messages, ensuring they aren't re-processed during later turns."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -1,118 +0,0 @@
---
sidebar_position: 1
---
# How to migrate from v0.0 memory
The concept of memory has evolved significantly in LangChain since its initial release.
Broadly speaking, LangChain 0.0.x memory was used to handle three main use cases:
| Use Case | Example |
|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|
| Managing conversation history | Keep only the last `n` turns of the conversation between the user and the AI. |
| Extraction of structured information | Extract structured information from the conversation history, such as a list of facts learned about the user. |
| Composite memory implementations | Combine multiple memory sources, e.g., a list of known facts about the user along with facts learned during a given conversation. |
While the LangChain 0.0.x memory abstractions were useful, they were limited in their capabilities and not well suited for real-world conversational AI applications. These memory abstractions lacked built-in support for multi-user, multi-conversation scenarios, which are essential for practical conversational AI systems.
This guide will help you migrate your usage of memory implementations from LangChain v0.0.x to the persistence implementations of LangGraph.
## Why use LangGraph for memory?
The main advantages of persistence implementation in LangGraph are:
- Built-in support for multi-user, multi-conversation scenarios which is often a requirement for real-world conversational AI applications.
- Ability to save and resume complex state at any time for error recovery, human-in-the-loop workflows, time travel interactions, and more.
- Full support for both [LLM](/docs/concepts/#llms) and [chat models](/docs/concepts/#chat-models). In contrast, the v0.0.x memory abstractions were created prior to the existence and widespread adoption of chat model APIs, and so it does not work well with chat models (e.g., fails with tool calling chat models).
- Offers a high degree of customization and control over the memory implementation, including the ability to use different backends.
## Migrations
:::info Prerequisites
These guides assume some familiarity with the following concepts:
- [LangGraph](https://langchain-ai.github.io/langgraph/)
- [v0.0.x Memory](https://python.langchain.com/v0.1/docs/modules/memory/)
- [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)
:::
### 1. Managing conversation history
The goal of managing conversation history is to store and retrieve the history in a way that is optimal for a chat model to use.
Often this involves trimming and / or summarizing the conversation history to keep the most relevant parts of the conversation while having the conversation fit inside the context window of the chat model.
Memory classes that fall into this category include:
| Memory Type | How to Migrate | Description |
|-----------------------------------|:-------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ConversationBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A basic memory implementation that simply stores the conversation history. |
| `ConversationStringBufferMemory` | [Link to Migration Guide](conversation_buffer_memory) | A special case of `ConversationBufferMemory` designed for LLMs and no longer relevant. |
| `ConversationBufferWindowMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps the last `n` turns of the conversation. Drops the oldest turn when the buffer is full. |
| `ConversationTokenBufferMemory` | [Link to Migration Guide](conversation_buffer_window_memory) | Keeps only the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
| `ConversationSummaryMemory` | [Link to Migration Guide](conversation_summary_memory) | Continually summarizes the conversation history. The summary is updated after each conversation turn. The abstraction returns the summary of the conversation history. |
| `ConversationSummaryBufferMemory` | [Link to Migration Guide](conversation_summary_memory) | Provides a running summary of the conversation together with the most recent messages in the conversation under the constraint that the total number of tokens in the conversation does not exceed a certain limit. |
| `VectorStoreRetrieverMemory` | No migration guide yet | Stores the conversation history in a vector store and retrieves the most relevant parts of past conversation based on the input. |
### 2. Extraction of structured information from the conversation history
Memory classes that fall into this category include:
| Memory Type | Description |
|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `BaseEntityStore` | An abstract interface that resembles a key-value store. It was used for storing structured information learned during the conversation. The information had to be represented as a dictionary of key-value pairs. |
| `ConversationEntityMemory` | Combines the ability to summarize the conversation while extracting structured information from the conversation history. |
And specific backend implementations of abstractions:
| Memory Type | Description |
|---------------------------|----------------------------------------------------------------------------------------------------------|
| `InMemoryEntityStore` | An implementation of `BaseEntityStore` that stores the information in the literal computer memory (RAM). |
| `RedisEntityStore` | A specific implementation of `BaseEntityStore` that uses Redis as the backend. |
| `SQLiteEntityStore` | A specific implementation of `BaseEntityStore` that uses SQLite as the backend. |
| `UpstashRedisEntityStore` | A specific implementation of `BaseEntityStore` that uses Upstash as the backend. |
These abstractions have not received much development since their initial release. The reason
is that for these abstractions to be useful they typically require a lot of specialization for a particular application, so these
abstractions are not as widely used as the conversation history management abstractions.
For this reason, there are no migration guides for these abstractions. If you're struggling to migrate an applications
that relies on these abstractions, please open an issue on the LangChain GitHub repository and we'll try to prioritize providing
more guidance on how to migrate these abstractions.
The general strategy for extracting structured information from the conversation history is to use a chat model with tool calling capabilities to extract structured information from the conversation history.
The extracted information can then be saved into an appropriate data structure (e.g., a dictionary), and information from it can be retrieved and added into the prompt as needed.
### 3. Implementations that provide composite logic on top of one or more memory implementations
Memory classes that fall into this category include:
| Memory Type | Description |
|------------------------|--------------------------------------------------------------------------------------------------------------------------------|
| `CombinedMemory` | This abstraction accepted a list of `BaseMemory` and fetched relevant memory information from each of them based on the input. |
| `SimpleMemory` | Used to add read-only hard-coded context. Users can simply write this information into the prompt. |
| `ReadOnlySharedMemory` | Provided a read-only view of an existing `BaseMemory` implementation. |
These implementations did not seem to be used widely or provide significant value. Users should be able
to re-implement these without too much difficulty in custom code.
## Related Resources
Explore persistence with LangGraph:
* [LangGraph quickstart tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/)
* [How to add persistence ("memory") to your graph](https://langchain-ai.github.io/langgraph/how-tos/persistence/)
* [How to manage conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/manage-conversation-history/)
* [How to add summary of the conversation history](https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/)
Add persistence with simple LCEL (favor langgraph for more complex use cases):
* [How to add message history](https://python.langchain.com/docs/how_to/message_history/)
Working with message history:
* [How to trim messages](https://python.langchain.com/docs/how_to/trim_messages)
* [How to filter messages](https://python.langchain.com/docs/how_to/filter_messages/)
* [How to merge message runs](https://python.langchain.com/docs/how_to/merge_message_runs/)

View File

@@ -1,8 +1,9 @@
---
sidebar_position: 0
sidebar_label: Overview of v0.2
---
# Overview
# Overview of LangChain v0.2
## Whats new in LangChain?

View File

@@ -8,7 +8,7 @@ keywords: [retrievalqa, llmchain, conversationalretrievalchain]
This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.
New features and improvements are not listed here. See the [overview](/docs/versions/v0_2/overview/) for a summary of what's new in this release.
New features and improvements are not listed here. See the [overview](/docs/versions/overview/) for a summary of what's new in this release.
## Breaking changes

View File

@@ -2,7 +2,7 @@
sidebar_position: 1
---
# Migration
# Migrating to LangChain v0.2

View File

@@ -3,7 +3,7 @@ sidebar_position: 2
sidebar_label: astream_events v2
---
# Migrating to astream_events(..., version="v2")
# Migrating to Astream Events v2
We've added a `v2` of the astream_events API with the release of `0.2.x`. You can see this [PR](https://github.com/langchain-ai/langchain/pull/21638) for more details.

View File

@@ -1,271 +0,0 @@
# LangChain v0.3
*Last updated: 09.16.24*
## What's changed
* All packages have been upgraded from Pydantic 1 to Pydantic 2 internally. Use of Pydantic 2 in user code is fully supported with all packages without the need for bridges like `langchain_core.pydantic_v1` or `pydantic.v1`.
* Pydantic 1 will no longer be supported as it reached its end-of-life in June 2024.
* Python 3.8 will no longer be supported as its end-of-life is October 2024.
**These are the only breaking changes.**
## Whats new
The following features have been added during the development of 0.2.x:
- Moved more integrations from `langchain-community` to their own `langchain-x` packages. This is a non-breaking change, as the legacy implementations are left in `langchain-community` and marked as deprecated. This allows us to better manage the dependencies of, test, and version these integrations. You can see all the latest integration packages in the [API reference](https://python.langchain.com/v0.2/api_reference/reference.html#integrations).
- Simplified tool definition and usage. Read more [here](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/).
- Added utilities for interacting with chat models: [universal model constructor](https://python.langchain.com/v0.2/docs/how_to/chat_models_universal_init/), [rate limiter](https://python.langchain.com/v0.2/docs/how_to/chat_model_rate_limiting/), [message utilities](https://python.langchain.com/v0.2/docs/how_to/#messages),
- Added the ability to [dispatch custom events](https://python.langchain.com/v0.2/docs/how_to/callbacks_custom_events/).
- Revamped integration docs and API reference. Read more [here](https://blog.langchain.dev/langchain-integration-docs-revamped/).
- Marked as deprecated a number of legacy chains and added migration guides for all of them. These are slated for removal in `langchain` 1.0.0. See the deprecated chains and associated [migration guides here](https://python.langchain.com/v0.2/docs/versions/migrating_chains/).
## How to update your code
If you're using `langchain` / `langchain-community` / `langchain-core` 0.0 or 0.1, we recommend that you first [upgrade to 0.2](https://python.langchain.com/v0.2/docs/versions/v0_2/).
If you're using `langgraph`, upgrade to `langgraph>=0.2.20,<0.3`. This will work with either 0.2 or 0.3 versions of all the base packages.
Here is a complete list of all packages that have been released and what we recommend upgrading your version constraints to.
Any package that now requires `langchain-core` 0.3 had a minor version bump.
Any package that is now compatible with both `langchain-core` 0.2 and 0.3 had a patch version bump.
You can use the `langchain-cli` to update deprecated imports automatically.
The CLI will handle updating deprecated imports that were introduced in LangChain 0.0.x and LangChain 0.1, as
well as updating the `langchain_core.pydantic_v1` and `langchain.pydantic_v1` imports.
### Base packages
| Package | Latest | Recommended constraint |
|--------------------------|--------|------------------------|
| langchain | 0.3.0 | >=0.3,<0.4 |
| langchain-community | 0.3.0 | >=0.3,<0.4 |
| langchain-text-splitters | 0.3.0 | >=0.3,<0.4 |
| langchain-core | 0.3.0 | >=0.3,<0.4 |
| langchain-experimental | 0.3.0 | >=0.3,<0.4 |
### Downstream packages
| Package | Latest | Recommended constraint |
|-----------|--------|------------------------|
| langgraph | 0.2.20 | >=0.2.20,<0.3 |
| langserve | 0.3.0 | >=0.3,<0.4 |
### Integration packages
| Package | Latest | Recommended constraint |
| -------------------------------------- | ------- | -------------------------- |
| langchain-ai21 | 0.2.0 | >=0.2,<0.3 |
| langchain-aws | 0.2.0 | >=0.2,<0.3 |
| langchain-anthropic | 0.2.0 | >=0.2,<0.3 |
| langchain-astradb | 0.4.1 | >=0.4.1,<0.5 |
| langchain-azure-dynamic-sessions | 0.2.0 | >=0.2,<0.3 |
| langchain-box | 0.2.0 | >=0.2,<0.3 |
| langchain-chroma | 0.1.4 | >=0.1.4,<0.2 |
| langchain-cohere | 0.3.0 | >=0.3,<0.4 |
| langchain-elasticsearch | 0.3.0 | >=0.3,<0.4 |
| langchain-exa | 0.2.0 | >=0.2,<0.3 |
| langchain-fireworks | 0.2.0 | >=0.2,<0.3 |
| langchain-groq | 0.2.0 | >=0.2,<0.3 |
| langchain-google-community | 2.0.0 | >=2,<3 |
| langchain-google-genai | 2.0.0 | >=2,<3 |
| langchain-google-vertexai | 2.0.0 | >=2,<3 |
| langchain-huggingface | 0.1.0 | >=0.1,<0.2 |
| langchain-ibm | 0.2.0 | >=0.2,<0.3 |
| langchain-milvus | 0.1.6 | >=0.1.6,<0.2 |
| langchain-mistralai | 0.2.0 | >=0.2,<0.3 |
| langchain-mongodb | 0.2.0 | >=0.2,<0.3 |
| langchain-nomic | 0.1.3 | >=0.1.3,<0.2 |
| langchain-ollama | 0.2.0 | >=0.2,<0.3 |
| langchain-openai | 0.2.0 | >=0.2,<0.3 |
| langchain-pinecone | 0.2.0 | >=0.2,<0.3 |
| langchain-postgres | 0.0.13 | >=0.0.13,<0.1 |
| langchain-prompty | 0.1.0 | >=0.1,<0.2 |
| langchain-qdrant | 0.1.4 | >=0.1.4,<0.2 |
| langchain-redis | 0.1.0 | >=0.1,<0.2 |
| langchain-sema4 | 0.2.0 | >=0.2,<0.3 |
| langchain-together | 0.2.0 | >=0.2,<0.3 |
| langchain-unstructured | 0.1.4 | >=0.1.4,<0.2 |
| langchain-upstage | 0.3.0 | >=0.3,<0.4 |
| langchain-voyageai | 0.2.0 | >=0.2,<0.3 |
| langchain-weaviate | 0.0.3 | >=0.0.3,<0.1 |
Once you've updated to recent versions of the packages, you may need to address the following issues stemming from the internal switch from Pydantic v1 to Pydantic v2:
- If your code depends on Pydantic aside from LangChain, you will need to upgrade your pydantic version constraints to be `pydantic>=2,<3`. See [Pydantics migration guide](https://docs.pydantic.dev/latest/migration/) for help migrating your non-LangChain code to Pydantic v2 if you use pydantic v1.
- There are a number of side effects to LangChain components caused by the internal switch from Pydantic v1 to v2. We have listed some of the common cases below together with the recommended solutions.
## Common issues when transitioning to Pydantic 2
### 1. Do not use the `langchain_core.pydantic_v1` namespace
Replace any usage of `langchain_core.pydantic_v1` or `langchain.pydantic_v1` with
direct imports from `pydantic`.
For example,
```python
from langchain_core.pydantic_v1 import BaseModel
```
to:
```python
from pydantic import BaseModel
```
This may require you to make additional updates to your Pydantic code given that there are a number of breaking changes in Pydantic 2. See the [Pydantic Migration](https://docs.pydantic.dev/latest/migration/) for how to upgrade your code from Pydantic 1 to 2.
### 2. Passing Pydantic objects to LangChain APIs
Users using the following APIs:
* `BaseChatModel.bind_tools`
* `BaseChatModel.with_structured_output`
* `Tool.from_function`
* `StructuredTool.from_function`
should ensure that they are passing Pydantic 2 objects to these APIs rather than
Pydantic 1 objects (created via the `pydantic.v1` namespace of pydantic 2).
:::caution
While `v1` objets may be accepted by some of these APIs, users are advised to
use Pydantic 2 objects to avoid future issues.
:::
### 3. Sub-classing LangChain models
Any sub-classing from existing LangChain models (e.g., `BaseTool`, `BaseChatModel`, `LLM`)
should upgrade to use Pydantic 2 features.
For example, any user code that's relying on Pydantic 1 features (e.g., `validator`) should
be updated to the Pydantic 2 equivalent (e.g., `field_validator`), and any references to
`pydantic.v1`, `langchain_core.pydantic_v1`, `langchain.pydantic_v1` should be replaced
with imports from `pydantic`.
```python
from pydantic.v1 import validator, Field # if pydantic 2 is installed
# from pydantic import validator, Field # if pydantic 1 is installed
# from langchain_core.pydantic_v1 import validator, Field
# from langchain.pydantic_v1 import validator, Field
class CustomTool(BaseTool): # BaseTool is v1 code
x: int = Field(default=1)
def _run(*args, **kwargs):
return "hello"
@validator('x') # v1 code
@classmethod
def validate_x(cls, x: int) -> int:
return 1
```
Should change to:
```python
from pydantic import Field, field_validator # pydantic v2
from langchain_core.pydantic_v1 import BaseTool
class CustomTool(BaseTool): # BaseTool is v1 code
x: int = Field(default=1)
def _run(*args, **kwargs):
return "hello"
@field_validator('x') # v2 code
@classmethod
def validate_x(cls, x: int) -> int:
return 1
CustomTool(
name='custom_tool',
description="hello",
x=1,
)
```
### 4. model_rebuild()
When sub-classing from LangChain models, users may need to add relevant imports
to the file and rebuild the model.
You can read more about `model_rebuild` [here](https://docs.pydantic.dev/latest/concepts/models/#rebuilding-model-schema).
```python
from langchain_core.output_parsers import BaseOutputParser
class FooParser(BaseOutputParser):
...
```
New code:
```python
from typing import Optional as Optional
from langchain_core.output_parsers import BaseOutputParser
class FooParser(BaseOutputParser):
...
FooParser.model_rebuild()
```
## Migrate using langchain-cli
The `langchain-cli` can help update deprecated LangChain imports in your code automatically.
Please note that the `langchain-cli` only handles deprecated LangChain imports and cannot
help to upgrade your code from pydantic 1 to pydantic 2.
For help with the Pydantic 1 to 2 migration itself please refer to the [Pydantic Migration Guidelines](https://docs.pydantic.dev/latest/migration/).
As of 0.0.31, the `langchain-cli` relies on [gritql](https://about.grit.io/) for applying code mods.
### Installation
```bash
pip install -U langchain-cli
langchain-cli --version # <-- Make sure the version is at least 0.0.31
```
### Usage
Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
The `langchain-cli` will handle the `langchain_core.pydantic_v1` deprecation introduced in LangChain 0.3 as well
as older deprecations (e.g.,`from langchain.chat_models import ChatOpenAI` which should be `from langchain_openai import ChatOpenAI`),
You will need to run the migration script **twice** as it only applies one import replacement per run.
For example, say that your code is still using the old import `from langchain.chat_models import ChatOpenAI`:
After the first run, youll get: `from langchain_community.chat_models import ChatOpenAI`
After the second run, youll get: `from langchain_openai import ChatOpenAI`
```bash
# Run a first time
# Will replace from langchain.chat_models import ChatOpenAI
langchain-cli migrate --help [path to code] # Help
langchain-cli migrate [path to code] # Apply
# Run a second time to apply more import replacements
langchain-cli migrate --diff [path to code] # Preview
langchain-cli migrate [path to code] # Apply
```
### Other options
```bash
# See help menu
langchain-cli migrate --help
# Preview Changes without applying
langchain-cli migrate --diff [path to code]
# Approve changes interactively
langchain-cli migrate --interactive [path to code]
```

View File

@@ -0,0 +1,187 @@
---
sidebar_label: Overview of v0.3
---
# Overview of LangChain v0.3
## Whats new in LangChain?
The following features have been added during the development of 0.2.x:
- Weve recently revamped our integration docs and API reference. Read more [here](https://blog.langchain.dev/langchain-integration-docs-revamped/).
- Weve continued to migrate key integrations to their own `langchain-x` packages outside of `langchain-community`. This allows us to better manage the dependencies of, test, and version these integrations. You can see all the latest integration packages in the [API reference](https://python.langchain.com/v0.2/api_reference/reference.html#integrations).
- Weve simplified how to define and use tools. Read more [here](https://blog.langchain.dev/improving-core-tool-interfaces-and-docs-in-langchain/).
- Weve added a number of key utilities for interacting with chat models: [universal model constructor](https://python.langchain.com/v0.2/docs/how_to/chat_models_universal_init/), [rate limiter](https://python.langchain.com/v0.2/docs/how_to/chat_model_rate_limiting/), [message utilities](https://python.langchain.com/v0.2/docs/how_to/#messages),
- We've added the ability to [dispatch custom events](https://python.langchain.com/v0.2/docs/how_to/callbacks_custom_events/).
- We have marked as deprecated a number of legacy chains and added migration guides for all of them. These are slated for removal in langchain 1.0. See the deprecated chains and associated [migration guides here](https://python.langchain.com/v0.2/docs/versions/migrating_chains/).
## What's changed
* As of the 0.3 release, LangChain has been upgraded to use Pydantic 2 internally. Pydantic v2 will be fully supported across new packages without the need for any bridges like `langchain_core.pydantic_v1`.
* Pydantic 1 will no longer be supported as it reached its end-of-life in June 2024.
* Python 3.8 will no longer be supported as its end-of-life is October 2024.
## How to update your code
If you're using LangChain 0.0, or 0.1, we recommend that you first [upgrade to 0.2](https://python.langchain.com/v0.2/docs/versions/v0_2/). The langchain-cli will help you to migrate many imports automatically.
If you're using LangChain 0.2, update your packages to use `langchain-core>=0.3`. We've released 0.3 versions of langchain-core, langchain, langchain-community and langserve. `langgraph>=0.2.20` will work with either langchain-core 0.2 or 0.3.
The breaking changes in this release were:
1. The internal switch from Pydantic v1 to Pydantic v2.
2. The removal of the automatic addition of the suffix `Schema` to the names of tools.
Once you've updated to recent versions of the packages, you may need to address the following issues stemming from the internal switch from Pydantic v1 to Pydantic v2:
- If your code depends on Pydantic aside from LangChain, you will need to use `pydantic>=2,<3`. See [Pydantics migration guide](https://docs.pydantic.dev/latest/migration/) for help migrating your non-LangChain code to Pydantic v2 if you use pydantic v1.
- There are a number of side effects to LangChain components caused by the internal switch from Pydantic v1 to v2. We have listed some of the common cases below together with the recommended solutions.
If you're still using deprecated LangChain please follow the [migration guides here](https://python.langchain.com/v0.2/docs/versions/migrating_chains/).
## Common issues when transitioning to Pydantic 2
### 1. Do not use the langchain_core.pydantic_v1 namespace
Replace any usage of `langchain_core.pydantic_v1` or `langchain.pydantic_v1` with
direct imports from `pydantic`.
For example,
```python
from langchain_core.pydantic_v1 import BaseModel
```
to:
```python
from pydantic import BaseModel
```
### 2. Passing Pydantic objects to LangChain APIs
Users using the following APIs:
* `BaseChatModel.bind_tools`
* `BaseChatModel.with_structured_output`
* `Tool.from_function`
* `StructuredTool.from_function`
should ensure that they are passing Pydantic 2 objects to these APIs rather than
Pydantic 1 objects (created via the `pydantic.v1` namespace of pydantic 2).
:::caution
While `v1` objets may be accepted by some of these APIs, users are advised to
use Pydantic 2 objects to avoid future issues.
:::
### 3. Sub-classing LangChain models
Any sub-classing from existing LangChain models (e.g., `BaseTool`, `BaseChatModel`, `LLM`)
should upgrade to use Pydantic 2 features.
For example, any user code that's relying on Pydantic 1 features (e.g., `validator`) should
be updated to the Pydantic 2 equivalent (e.g., `field_validator`), and any references to
`pydantic.v1`, `langchain_core.pydantic_v1`, `langchain.pydantic_v1` should be replaced
with imports from `pydantic`.
```python
from pydantic.v1 import validator, Field # if pydantic 2 is installed
# from pydantic import validator, Field # if pydantic 1 is installed
# from langchain_core.pydantic_v1 import validator, Field
# from langchain.pydantic_v1 import validator, Field
class CustomTool(BaseTool): # BaseTool is v1 code
x: int = Field(default=1)
def _run(*args, **kwargs):
return "hello"
@validator('x') # v1 code
@classmethod
def validate_x(cls, x: int) -> int:
return 1
```
Should change to:
```python
from pydantic import Field, field_validator # pydantic v2
from langchain_core.pydantic_v1 import BaseTool
class CustomTool(BaseTool): # BaseTool is v1 code
x: int = Field(default=1)
def _run(*args, **kwargs):
return "hello"
@field_validator('x') # v2 code
@classmethod
def validate_x(cls, x: int) -> int:
return 1
CustomTool(
name='custom_tool',
description="hello",
x=1,
)
```
### 4. model_rebuild()
When sub-classing from LangChain models, users may need to add relevant imports
to the file and rebuild the model.
```python
from langchain_core.output_parsers import BaseOutputParser
class FooParser(BaseOutputParser):
...
```
New code:
```python
from typing import Optional as Optional
from langchain_core.output_parsers import BaseOutputParser
class FooParser(BaseOutputParser):
...
FooParser.model_rebuild()
```
## `Schema` suffix removal
In previous versions of LangChain, the suffix `Schema` was automatically added to the names of tools if a tool name was not
specified. This name was used to generate the schema for the tool which was sent to chat models as the JSON Schema for the tool.
We do not expect most users to be affected by this change.
For example, the tool
```python
from langchain_core.tools import tool
@tool
def add(x: int, y: int) -> int:
"""Add x and y."""
return x + y
```
would have been named `addSchema` in previous versions of LangChain. In 0.3, the name of the tool will be `add`.
```python
add.args_schema.model_json_schema()
```
```
{'description': 'Add x and y.',
'properties': {'x': {'title': 'X', 'type': 'integer'},
'y': {'title': 'Y', 'type': 'integer'}},
'required': ['x', 'y'],
'title': 'add',
'type': 'object'}
```

View File

@@ -168,43 +168,52 @@ const config = {
label: "Integrations",
},
{
label: "API Reference",
to: "https://python.langchain.com/api_reference/",
type: "dropdown",
label: "API reference",
position: "left",
items: [
{
label: "Latest",
to: "https://python.langchain.com/api_reference/reference.html",
},
{
label: "Legacy",
href: "https://api.python.langchain.com/"
}
]
},
{
type: "dropdown",
label: "More",
position: "left",
items: [
{
type: "doc",
docId: "contributing/index",
label: "Contributing",
},
{
type: "doc",
docId: "people",
label: "People",
},
{
type: 'html',
value: '<hr class="dropdown-separator" style="margin-top: 0.5rem; margin-bottom: 0.5rem">',
type: "doc",
docId: "contributing/index",
label: "Contributing",
},
{
href: "https://docs.smith.langchain.com",
label: "LangSmith",
label: "Cookbooks",
href: "https://github.com/langchain-ai/langchain/blob/master/cookbook/README.md"
},
{
href: "https://langchain-ai.github.io/langgraph/",
label: "LangGraph",
type: "doc",
docId: "additional_resources/tutorials",
label: "3rd party tutorials"
},
{
href: "https://smith.langchain.com/hub",
label: "LangChain Hub",
type: "doc",
docId: "additional_resources/youtube",
label: "YouTube"
},
{
href: "https://js.langchain.com",
label: "LangChain JS/TS",
to: "/docs/additional_resources/arxiv_references",
label: "arXiv"
},
]
},
@@ -228,7 +237,30 @@ const config = {
]
},
{
to: "https://chat.langchain.com",
type: "dropdown",
label: "🦜️🔗",
position: "right",
items: [
{
href: "https://smith.langchain.com",
label: "LangSmith",
},
{
href: "https://docs.smith.langchain.com/",
label: "LangSmith Docs",
},
{
href: "https://smith.langchain.com/hub",
label: "LangChain Hub",
},
{
href: "https://js.langchain.com",
label: "JS/TS Docs",
},
]
},
{
href: "https://chat.langchain.com",
label: "💬",
position: "right",
},
@@ -298,7 +330,7 @@ const config = {
// this is linked to erick@langchain.dev currently
apiKey: "6c01842d6a88772ed2236b9c85806441",
indexName: "python-langchain-latest",
indexName: "python-langchain-0.2",
contextualSearch: false,
},

View File

@@ -72,24 +72,25 @@ module.exports = {
collapsed: false,
collapsible: false,
items: [
{
type: 'doc',
id: 'versions/v0_3/index',
label: "v0.3",
},
{
type: "category",
label: "v0.2",
items: [{
type: 'autogenerated',
dirName: 'versions/v0_2',
}],
},
"versions/v0_3/overview",
"versions/overview",
{
type: 'doc',
id: "how_to/pydantic_compatibility",
label: "Pydantic compatibility",
},
{
type: "category",
label: "Migrating to v0.2",
link: {type: 'doc', id: 'versions/v0_2/index'},
collapsible: false,
collapsed: false,
items: [{
type: 'autogenerated',
dirName: 'versions/v0_2',
className: 'hidden',
}],
},
{
type: "category",
label: "Migrating from v0.0 chains",

View File

@@ -38,9 +38,6 @@
--ifm-menu-link-padding-horizontal: 0.5rem;
--ifm-menu-link-padding-vertical: 0.5rem;
--doc-sidebar-width: 275px !important;
/* Code block syntax highlighting */
--docusaurus-highlighted-code-line-bg: rgb(176, 227, 199);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
@@ -52,9 +49,6 @@
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
/* Code block syntax highlighting */
--docusaurus-highlighted-code-line-bg: rgb(14, 73, 60);
}
nav, h1, h2, h3, h4 {

View File

@@ -354,7 +354,7 @@ const FEATURE_TABLES = {
},
{
name: "Nomic",
link: "nomic",
link: "cohere",
package: "langchain-nomic",
apiLink: "https://python.langchain.com/api_reference/nomic/embeddings/langchain_nomic.embeddings.NomicEmbeddings.html"
},
@@ -886,7 +886,7 @@ const FEATURE_TABLES = {
apiLink: "https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.html_bs.BSHTMLLoader.html"
},
{
name: "UnstructuredXMLLoader",
name: "UnstrucutredXMLLoader",
link: "xml",
source: "XML files",
apiLink: "https://python.langchain.com/api_reference/community/document_loaders/langchain_community.document_loaders.xml.UnstructuredXMLLoader.html"

View File

@@ -2538,389 +2538,5 @@ const suggestedLinks = {
"alternative": [
"/v0.1/docs/use_cases/web_scraping/"
]
},
// below are new
"/docs/modules/data_connection/document_transformers/text_splitters/": {"canonical": "/docs/how_to/#text-splitters", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter/": {"canonical": "/docs/how_to/character_text_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/character_text_splitter/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/code_splitter/": {"canonical": "/docs/how_to/code_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/code_splitter/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/HTML_header_metadata/": {"canonical": "/docs/how_to/HTML_header_metadata_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/HTML_header_metadata/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/HTML_section_aware_splitter/": {"canonical": "/docs/how_to/HTML_section_aware_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/markdown_header_metadata/": {"canonical": "/docs/how_to/markdown_header_metadata_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/markdown_header_metadata/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/recursive_json_splitter/": {"canonical": "/docs/how_to/recursive_json_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/recursive_json_splitter/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter/": {"canonical": "/docs/how_to/recursive_text_splitter/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/recursive_text_splitter/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/semantic-chunker/": {"canonical": "/docs/how_to/semantic-chunker/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/semantic-chunker/"]},
"/docs/modules/data_connection/document_transformers/text_splitters/split_by_token/": {"canonical": "/docs/how_to/split_by_token/", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/split_by_token/"]},
"/docs/modules/model_io/prompts/prompt_templates/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/"]},
"/docs/modules/model_io/prompts/prompt_templates/composition/": {"canonical": "/docs/how_to/prompts_composition/", "alternative": ["/v0.1/docs/modules/model_io/prompts/composition/"]},
"/docs/modules/model_io/prompts/prompt_templates/example_selectors/": {"canonical": "/docs/how_to/example_selectors/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/"]},
"/docs/modules/model_io/prompts/prompt_templates/example_selectors/length_based/": {"canonical": "/docs/how_to/example_selectors_length_based/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/length_based/"]},
"/docs/modules/model_io/prompts/prompt_templates/example_selectors/mmr/": {"canonical": "/docs/how_to/example_selectors_mmr/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/"]},
"/docs/modules/model_io/prompts/prompt_templates/example_selectors/ngram_overlap/": {"canonical": "/docs/how_to/example_selectors_ngram/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/ngram_overlap/"]},
"/docs/modules/model_io/prompts/prompt_templates/example_selectors/similarity/": {"canonical": "/docs/how_to/example_selectors_similarity/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/similarity/"]},
"/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat/": {"canonical": "/docs/how_to/few_shot_examples_chat/", "alternative": ["/v0.1/docs/modules/model_io/prompts/few_shot_examples_chat/"]},
"/docs/modules/model_io/prompts/prompt_templates/few_shot_examples/": {"canonical": "/docs/how_to/few_shot_examples/", "alternative": ["/v0.1/docs/modules/model_io/prompts/few_shot_examples/"]},
"/docs/modules/model_io/prompts/prompt_templates/partial/": {"canonical": "/docs/how_to/prompts_partial/", "alternative": ["/v0.1/docs/modules/model_io/prompts/partial/"]},
"/docs/modules/model_io/prompts/prompt_templates/quick_start/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/quick_start/"]},
"/docs/modules/model_io/models/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/"]},
"/docs/modules/model_io/models/chat/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/chat/"]},
"/docs/modules/model_io/models/chat/chat_model_caching/": {"canonical": "/docs/how_to/chat_model_caching/", "alternative": ["/v0.1/docs/modules/model_io/chat/chat_model_caching/"]},
"/docs/modules/model_io/models/chat/custom_chat_model/": {"canonical": "/docs/how_to/custom_chat_model/", "alternative": ["/v0.1/docs/modules/model_io/chat/custom_chat_model/"]},
"/docs/modules/model_io/models/chat/function_calling/": {"canonical": "/docs/how_to/tool_calling/", "alternative": ["/v0.1/docs/modules/model_io/chat/function_calling/"]},
"/docs/modules/model_io/models/chat/logprobs/": {"canonical": "/docs/how_to/logprobs/", "alternative": ["/v0.1/docs/modules/model_io/chat/logprobs/"]},
"/docs/modules/model_io/models/chat/message_types/": {"canonical": "/docs/concepts/#messages", "alternative": ["/v0.1/docs/modules/model_io/chat/message_types/"]},
"/docs/modules/model_io/models/chat/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/chat/quick_start/"]},
"/docs/modules/model_io/models/chat/response_metadata/": {"canonical": "/docs/how_to/response_metadata/", "alternative": ["/v0.1/docs/modules/model_io/chat/response_metadata/"]},
"/docs/modules/model_io/models/chat/streaming/": {"canonical": "/docs/how_to/streaming/", "alternative": ["/v0.1/docs/modules/model_io/chat/streaming/"]},
"/docs/modules/model_io/models/chat/structured_output/": {"canonical": "/docs/how_to/structured_output/", "alternative": ["/v0.1/docs/modules/model_io/chat/structured_output/"]},
"/docs/modules/model_io/models/chat/token_usage_tracking/": {"canonical": "/docs/how_to/chat_token_usage_tracking/", "alternative": ["/v0.1/docs/modules/model_io/chat/token_usage_tracking/"]},
"/docs/modules/model_io/models/concepts/": {"canonical": "/docs/concepts/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/concepts/"]},
"/docs/modules/model_io/models/llms/": {"canonical": "/docs/concepts/#llms", "alternative": ["/v0.1/docs/modules/model_io/llms/"]},
"/docs/modules/model_io/models/llms/custom_llm/": {"canonical": "/docs/how_to/custom_llm/", "alternative": ["/v0.1/docs/modules/model_io/llms/custom_llm/"]},
"/docs/modules/model_io/models/llms/llm_caching/": {"canonical": "/docs/how_to/llm_caching/", "alternative": ["/v0.1/docs/modules/model_io/llms/llm_caching/"]},
"/docs/modules/model_io/models/llms/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/llms/quick_start/"]},
"/docs/modules/model_io/models/llms/streaming_llm/": {"canonical": "/docs/how_to/streaming_llm/", "alternative": ["/v0.1/docs/modules/model_io/llms/streaming_llm/"]},
"/docs/modules/model_io/models/llms/token_usage_tracking/": {"canonical": "/docs/how_to/llm_token_usage_tracking/", "alternative": ["/v0.1/docs/modules/model_io/llms/token_usage_tracking/"]},
"/docs/modules/model_io/models/output_parsers/": {"canonical": "/docs/how_to/#output-parsers", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/"]},
"/docs/modules/model_io/models/output_parsers/custom/": {"canonical": "/docs/how_to/output_parser_custom/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/custom/"]},
"/docs/modules/model_io/models/output_parsers/quick_start/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/quick_start/"]},
"/docs/modules/model_io/models/output_parsers/types/csv/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/csv/"]},
"/docs/modules/model_io/models/output_parsers/types/datetime/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/datetime/"]},
"/docs/modules/model_io/models/output_parsers/types/enum/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/enum/"]},
"/docs/modules/model_io/models/output_parsers/types/json/": {"canonical": "/docs/how_to/output_parser_json/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/json/"]},
"/docs/modules/model_io/models/output_parsers/types/openai_functions/": {"canonical": "/docs/how_to/structured_output/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/openai_functions/"]},
"/docs/modules/model_io/models/output_parsers/types/openai_tools/": {"canonical": "/docs/how_to/tool_calling/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/openai_tools/"]},
"/docs/modules/model_io/models/output_parsers/types/output_fixing/": {"canonical": "/docs/how_to/output_parser_fixing/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/output_fixing/"]},
"/docs/modules/model_io/models/output_parsers/types/pandas_dataframe/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/pandas_dataframe/"]},
"/docs/modules/model_io/models/output_parsers/types/pydantic/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/pydantic/"]},
"/docs/modules/model_io/models/output_parsers/types/retry/": {"canonical": "/docs/how_to/output_parser_retry/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/retry/"]},
"/docs/modules/model_io/models/output_parsers/types/structured/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/structured/"]},
"/docs/modules/model_io/models/output_parsers/types/xml/": {"canonical": "/docs/how_to/output_parser_xml/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/xml/"]},
"/docs/modules/model_io/models/output_parsers/types/yaml/": {"canonical": "/docs/how_to/output_parser_yaml/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/yaml/"]},
"/docs/modules/model_io/models/prompts/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/"]},
"/docs/modules/model_io/models/prompts/composition/": {"canonical": "/docs/how_to/prompts_composition/", "alternative": ["/v0.1/docs/modules/model_io/prompts/composition/"]},
"/docs/modules/model_io/models/prompts/example_selectors/": {"canonical": "/docs/how_to/example_selectors/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/"]},
"/docs/modules/model_io/models/prompts/example_selectors/length_based/": {"canonical": "/docs/how_to/example_selectors_length_based/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/length_based/"]},
"/docs/modules/model_io/models/prompts/example_selectors/mmr/": {"canonical": "/docs/how_to/example_selectors_mmr/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/"]},
"/docs/modules/model_io/models/prompts/example_selectors/ngram_overlap/": {"canonical": "/docs/how_to/example_selectors_ngram/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/ngram_overlap/"]},
"/docs/modules/model_io/models/prompts/example_selectors/similarity/": {"canonical": "/docs/how_to/example_selectors_similarity/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/similarity/"]},
"/docs/modules/model_io/models/prompts/few_shot_examples_chat/": {"canonical": "/docs/how_to/few_shot_examples_chat/", "alternative": ["/v0.1/docs/modules/model_io/prompts/few_shot_examples_chat/"]},
"/docs/modules/model_io/models/prompts/few_shot_examples/": {"canonical": "/docs/how_to/few_shot_examples/", "alternative": ["/v0.1/docs/modules/model_io/prompts/few_shot_examples/"]},
"/docs/modules/model_io/models/prompts/partial/": {"canonical": "/docs/how_to/prompts_partial/", "alternative": ["/v0.1/docs/modules/model_io/prompts/partial/"]},
"/docs/modules/model_io/models/prompts/quick_start/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/quick_start/"]},
"/docs/modules/model_io/models/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/quick_start/"]},
"/docs/use_cases/more/graph/": {"canonical": "/docs/tutorials/graph/", "alternative": ["/v0.1/docs/use_cases/graph/"]},
"/docs/use_cases/more/graph/constructing/": {"canonical": "/docs/tutorials/graph/", "alternative": ["/v0.1/docs/use_cases/graph/constructing/"]},
"/docs/use_cases/more/graph/mapping/": {"canonical": "/docs/tutorials/graph/", "alternative": ["/v0.1/docs/use_cases/graph/mapping/"]},
"/docs/use_cases/more/graph/prompting/": {"canonical": "/docs/tutorials/graph/", "alternative": ["/v0.1/docs/use_cases/graph/prompting/"]},
"/docs/use_cases/more/graph/quickstart/": {"canonical": "/docs/tutorials/graph/", "alternative": ["/v0.1/docs/use_cases/graph/quickstart/"]},
"/docs/use_cases/more/graph/semantic/": {"canonical": "/docs/tutorials/graph/", "alternative": ["/v0.1/docs/use_cases/graph/semantic/"]},
"/docs/modules/model_io/chat/how_to/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/chat/"]},
"/docs/modules/model_io/chat/how_to/chat_model_caching/": {"canonical": "/docs/how_to/chat_model_caching/", "alternative": ["/v0.1/docs/modules/model_io/chat/chat_model_caching/"]},
"/docs/modules/model_io/chat/how_to/custom_chat_model/": {"canonical": "/docs/how_to/custom_chat_model/", "alternative": ["/v0.1/docs/modules/model_io/chat/custom_chat_model/"]},
"/docs/modules/model_io/chat/how_to/function_calling/": {"canonical": "/docs/how_to/tool_calling/", "alternative": ["/v0.1/docs/modules/model_io/chat/function_calling/"]},
"/docs/modules/model_io/chat/how_to/logprobs/": {"canonical": "/docs/how_to/logprobs/", "alternative": ["/v0.1/docs/modules/model_io/chat/logprobs/"]},
"/docs/modules/model_io/chat/how_to/message_types/": {"canonical": "/docs/concepts/#messages", "alternative": ["/v0.1/docs/modules/model_io/chat/message_types/"]},
"/docs/modules/model_io/chat/how_to/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/chat/quick_start/"]},
"/docs/modules/model_io/chat/how_to/response_metadata/": {"canonical": "/docs/how_to/response_metadata/", "alternative": ["/v0.1/docs/modules/model_io/chat/response_metadata/"]},
"/docs/modules/model_io/chat/how_to/streaming/": {"canonical": "/docs/how_to/streaming/", "alternative": ["/v0.1/docs/modules/model_io/chat/streaming/"]},
"/docs/modules/model_io/chat/how_to/structured_output/": {"canonical": "/docs/how_to/structured_output/", "alternative": ["/v0.1/docs/modules/model_io/chat/structured_output/"]},
"/docs/modules/model_io/chat/how_to/token_usage_tracking/": {"canonical": "/docs/how_to/chat_token_usage_tracking/", "alternative": ["/v0.1/docs/modules/model_io/chat/token_usage_tracking/"]},
"/docs/modules/model_io/llms/how_to/": {"canonical": "/docs/concepts/#llms", "alternative": ["/v0.1/docs/modules/model_io/llms/"]},
"/docs/modules/model_io/llms/how_to/custom_llm/": {"canonical": "/docs/how_to/custom_llm/", "alternative": ["/v0.1/docs/modules/model_io/llms/custom_llm/"]},
"/docs/modules/model_io/llms/how_to/llm_caching/": {"canonical": "/docs/how_to/llm_caching/", "alternative": ["/v0.1/docs/modules/model_io/llms/llm_caching/"]},
"/docs/modules/model_io/llms/how_to/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/llms/quick_start/"]},
"/docs/modules/model_io/llms/how_to/streaming_llm/": {"canonical": "/docs/how_to/streaming_llm/", "alternative": ["/v0.1/docs/modules/model_io/llms/streaming_llm/"]},
"/docs/modules/model_io/llms/how_to/token_usage_tracking/": {"canonical": "/docs/how_to/llm_token_usage_tracking/", "alternative": ["/v0.1/docs/modules/model_io/llms/token_usage_tracking/"]},
"/docs/modules/model_io/llms/integrations/llm_caching/": {"canonical": "/docs/how_to/llm_caching/", "alternative": ["/v0.1/docs/integrations/llms/llm_caching/"]},
"/docs/modules/model_io/chat/integrations/ollama_functions/": {"canonical": "/docs/integrations/chat/ollama/", "alternative": ["/v0.1/docs/integrations/chat/ollama_functions/"]},
"/en/latest/modules/models/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/"]},
"/en/latest/modules/models/chat/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/chat/"]},
"/en/latest/modules/models/chat/chat_model_caching/": {"canonical": "/docs/how_to/chat_model_caching/", "alternative": ["/v0.1/docs/modules/model_io/chat/chat_model_caching/"]},
"/en/latest/modules/models/chat/custom_chat_model/": {"canonical": "/docs/how_to/custom_chat_model/", "alternative": ["/v0.1/docs/modules/model_io/chat/custom_chat_model/"]},
"/en/latest/modules/models/chat/function_calling/": {"canonical": "/docs/how_to/tool_calling/", "alternative": ["/v0.1/docs/modules/model_io/chat/function_calling/"]},
"/en/latest/modules/models/chat/logprobs/": {"canonical": "/docs/how_to/logprobs/", "alternative": ["/v0.1/docs/modules/model_io/chat/logprobs/"]},
"/en/latest/modules/models/chat/message_types/": {"canonical": "/docs/concepts/#messages", "alternative": ["/v0.1/docs/modules/model_io/chat/message_types/"]},
"/en/latest/modules/models/chat/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/chat/quick_start/"]},
"/en/latest/modules/models/chat/response_metadata/": {"canonical": "/docs/how_to/response_metadata/", "alternative": ["/v0.1/docs/modules/model_io/chat/response_metadata/"]},
"/en/latest/modules/models/chat/streaming/": {"canonical": "/docs/how_to/streaming/", "alternative": ["/v0.1/docs/modules/model_io/chat/streaming/"]},
"/en/latest/modules/models/chat/structured_output/": {"canonical": "/docs/how_to/structured_output/", "alternative": ["/v0.1/docs/modules/model_io/chat/structured_output/"]},
"/en/latest/modules/models/chat/token_usage_tracking/": {"canonical": "/docs/how_to/chat_token_usage_tracking/", "alternative": ["/v0.1/docs/modules/model_io/chat/token_usage_tracking/"]},
"/en/latest/modules/models/concepts/": {"canonical": "/docs/concepts/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/concepts/"]},
"/en/latest/modules/models/llms/": {"canonical": "/docs/concepts/#llms", "alternative": ["/v0.1/docs/modules/model_io/llms/"]},
"/en/latest/modules/models/llms/custom_llm/": {"canonical": "/docs/how_to/custom_llm/", "alternative": ["/v0.1/docs/modules/model_io/llms/custom_llm/"]},
"/en/latest/modules/models/llms/llm_caching/": {"canonical": "/docs/how_to/llm_caching/", "alternative": ["/v0.1/docs/modules/model_io/llms/llm_caching/"]},
"/en/latest/modules/models/llms/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/llms/quick_start/"]},
"/en/latest/modules/models/llms/streaming_llm/": {"canonical": "/docs/how_to/streaming_llm/", "alternative": ["/v0.1/docs/modules/model_io/llms/streaming_llm/"]},
"/en/latest/modules/models/llms/token_usage_tracking/": {"canonical": "/docs/how_to/llm_token_usage_tracking/", "alternative": ["/v0.1/docs/modules/model_io/llms/token_usage_tracking/"]},
"/en/latest/modules/models/output_parsers/": {"canonical": "/docs/how_to/#output-parsers", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/"]},
"/en/latest/modules/models/output_parsers/custom/": {"canonical": "/docs/how_to/output_parser_custom/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/custom/"]},
"/en/latest/modules/models/output_parsers/quick_start/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/quick_start/"]},
"/en/latest/modules/models/output_parsers/types/csv/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/csv/"]},
"/en/latest/modules/models/output_parsers/types/datetime/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/datetime/"]},
"/en/latest/modules/models/output_parsers/types/enum/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/enum/"]},
"/en/latest/modules/models/output_parsers/types/json/": {"canonical": "/docs/how_to/output_parser_json/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/json/"]},
"/en/latest/modules/models/output_parsers/types/openai_functions/": {"canonical": "/docs/how_to/structured_output/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/openai_functions/"]},
"/en/latest/modules/models/output_parsers/types/openai_tools/": {"canonical": "/docs/how_to/tool_calling/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/openai_tools/"]},
"/en/latest/modules/models/output_parsers/types/output_fixing/": {"canonical": "/docs/how_to/output_parser_fixing/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/output_fixing/"]},
"/en/latest/modules/models/output_parsers/types/pandas_dataframe/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/pandas_dataframe/"]},
"/en/latest/modules/models/output_parsers/types/pydantic/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/pydantic/"]},
"/en/latest/modules/models/output_parsers/types/retry/": {"canonical": "/docs/how_to/output_parser_retry/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/retry/"]},
"/en/latest/modules/models/output_parsers/types/structured/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/structured/"]},
"/en/latest/modules/models/output_parsers/types/xml/": {"canonical": "/docs/how_to/output_parser_xml/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/xml/"]},
"/en/latest/modules/models/output_parsers/types/yaml/": {"canonical": "/docs/how_to/output_parser_yaml/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/yaml/"]},
"/en/latest/modules/models/prompts/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/"]},
"/en/latest/modules/models/prompts/composition/": {"canonical": "/docs/how_to/prompts_composition/", "alternative": ["/v0.1/docs/modules/model_io/prompts/composition/"]},
"/en/latest/modules/models/prompts/example_selectors/": {"canonical": "/docs/how_to/example_selectors/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/"]},
"/en/latest/modules/models/prompts/example_selectors/length_based/": {"canonical": "/docs/how_to/example_selectors_length_based/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/length_based/"]},
"/en/latest/modules/models/prompts/example_selectors/mmr/": {"canonical": "/docs/how_to/example_selectors_mmr/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/"]},
"/en/latest/modules/models/prompts/example_selectors/ngram_overlap/": {"canonical": "/docs/how_to/example_selectors_ngram/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/ngram_overlap/"]},
"/en/latest/modules/models/prompts/example_selectors/similarity/": {"canonical": "/docs/how_to/example_selectors_similarity/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/similarity/"]},
"/en/latest/modules/models/prompts/few_shot_examples_chat/": {"canonical": "/docs/how_to/few_shot_examples_chat/", "alternative": ["/v0.1/docs/modules/model_io/prompts/few_shot_examples_chat/"]},
"/en/latest/modules/models/prompts/few_shot_examples/": {"canonical": "/docs/how_to/few_shot_examples/", "alternative": ["/v0.1/docs/modules/model_io/prompts/few_shot_examples/"]},
"/en/latest/modules/models/prompts/partial/": {"canonical": "/docs/how_to/prompts_partial/", "alternative": ["/v0.1/docs/modules/model_io/prompts/partial/"]},
"/en/latest/modules/models/prompts/quick_start/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/quick_start/"]},
"/en/latest/modules/models/quick_start/": {"canonical": "/docs/tutorials/llm_chain/", "alternative": ["/v0.1/docs/modules/model_io/quick_start/"]},
"/docs/modules/model_io/prompts/example_selector_types/": {"canonical": "/docs/how_to/example_selectors/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/"]},
"/docs/modules/model_io/prompts/example_selector_types/length_based/": {"canonical": "/docs/how_to/example_selectors_length_based/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/length_based/"]},
"/docs/modules/model_io/prompts/example_selector_types/mmr/": {"canonical": "/docs/how_to/example_selectors_mmr/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/mmr/"]},
"/docs/modules/model_io/prompts/example_selector_types/ngram_overlap/": {"canonical": "/docs/how_to/example_selectors_ngram/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/ngram_overlap/"]},
"/docs/modules/model_io/prompts/example_selector_types/similarity/": {"canonical": "/docs/how_to/example_selectors_similarity/", "alternative": ["/v0.1/docs/modules/model_io/prompts/example_selectors/similarity/"]},
"/docs/modules/agents/tools/": {"canonical": "/docs/how_to/#tools", "alternative": ["/v0.1/docs/modules/tools/"]},
"/docs/modules/agents/tools/custom_tools/": {"canonical": "/docs/how_to/custom_tools/", "alternative": ["/v0.1/docs/modules/tools/custom_tools/"]},
"/docs/modules/agents/tools/toolkits/": {"canonical": "/docs/how_to/#tools", "alternative": ["/v0.1/docs/modules/tools/toolkits/"]},
"/docs/modules/agents/tools/tools_as_openai_functions/": {"canonical": "/docs/how_to/tool_calling/", "alternative": ["/v0.1/docs/modules/tools/tools_as_openai_functions/"]},
"/docs/guides/deployments/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/deployments/"]},
"/docs/guides/deployments/template_repos/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/deployments/template_repos/"]},
"/docs/guides/evaluation/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/"]},
"/docs/guides/evaluation/comparison/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/comparison/"]},
"/docs/guides/evaluation/comparison/custom/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/comparison/custom/"]},
"/docs/guides/evaluation/comparison/pairwise_embedding_distance/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/comparison/pairwise_embedding_distance/"]},
"/docs/guides/evaluation/comparison/pairwise_string/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/comparison/pairwise_string/"]},
"/docs/guides/evaluation/examples/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/examples/"]},
"/docs/guides/evaluation/examples/comparisons/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/examples/comparisons/"]},
"/docs/guides/evaluation/string/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/"]},
"/docs/guides/evaluation/string/criteria_eval_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/criteria_eval_chain/"]},
"/docs/guides/evaluation/string/custom/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/custom/"]},
"/docs/guides/evaluation/string/embedding_distance/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/embedding_distance/"]},
"/docs/guides/evaluation/string/exact_match/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/exact_match/"]},
"/docs/guides/evaluation/string/json/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/json/"]},
"/docs/guides/evaluation/string/regex_match/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/regex_match/"]},
"/docs/guides/evaluation/string/scoring_eval_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/scoring_eval_chain/"]},
"/docs/guides/evaluation/string/string_distance/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/string/string_distance/"]},
"/docs/guides/evaluation/trajectory/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/trajectory/"]},
"/docs/guides/evaluation/trajectory/custom/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/trajectory/custom/"]},
"/docs/guides/evaluation/trajectory/trajectory_eval/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/trajectory/trajectory_eval/"]},
"/docs/guides/privacy/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/"]},
"/docs/guides/privacy/amazon_comprehend_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/amazon_comprehend_chain/"]},
"/docs/guides/privacy/constitutional_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/constitutional_chain/"]},
"/docs/guides/privacy/hugging_face_prompt_injection/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/hugging_face_prompt_injection/"]},
"/docs/guides/privacy/layerup_security/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/layerup_security/"]},
"/docs/guides/privacy/logical_fallacy_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/logical_fallacy_chain/"]},
"/docs/guides/privacy/moderation/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/moderation/"]},
"/docs/guides/privacy/presidio_data_anonymization/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/"]},
"/docs/guides/privacy/presidio_data_anonymization/multi_language/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/multi_language/"]},
"/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/"]},
"/docs/guides/privacy/presidio_data_anonymization/reversible/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"]},
"/docs/guides/safety/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/"]},
"/docs/guides/safety/amazon_comprehend_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/amazon_comprehend_chain/"]},
"/docs/guides/safety/constitutional_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/constitutional_chain/"]},
"/docs/guides/safety/hugging_face_prompt_injection/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/hugging_face_prompt_injection/"]},
"/docs/guides/safety/layerup_security/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/layerup_security/"]},
"/docs/guides/safety/logical_fallacy_chain/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/logical_fallacy_chain/"]},
"/docs/guides/safety/moderation/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/moderation/"]},
"/docs/guides/safety/presidio_data_anonymization/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/"]},
"/docs/guides/safety/presidio_data_anonymization/multi_language/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/multi_language/"]},
"/docs/guides/safety/presidio_data_anonymization/qa_privacy_protection/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/qa_privacy_protection/"]},
"/docs/guides/safety/presidio_data_anonymization/reversible/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/presidio_data_anonymization/reversible/"]},
"/docs/integrations/llms/titan_takeoff_pro/": {"canonical": "/docs/integrations/llms/titan_takeoff/"},
"/docs/integrations/providers/optimum_intel/": {"canonical": "/docs/integrations/providers/intel/"},
"/docs/use_cases/graph/integrations/diffbot_graphtransformer/": {"canonical": "/docs/integrations/graphs/diffbot/"},
"/docs/use_cases/graph/integrations/graph_arangodb_qa/": {"canonical": "/docs/integrations/graphs/arangodb/"},
"/docs/use_cases/graph/integrations/graph_cypher_qa/": {"canonical": "/docs/integrations/graphs/neo4j_cypher/"},
"/docs/use_cases/graph/integrations/graph_falkordb_qa/": {"canonical": "/docs/integrations/graphs/falkordb/"},
"/docs/use_cases/graph/integrations/graph_gremlin_cosmosdb_qa/": {"canonical": "/docs/integrations/graphs/azure_cosmosdb_gremlin/"},
"/docs/use_cases/graph/integrations/graph_hugegraph_qa/": {"canonical": "/docs/integrations/graphs/hugegraph/"},
"/docs/use_cases/graph/integrations/graph_kuzu_qa/": {"canonical": "/docs/integrations/graphs/kuzu_db/"},
"/docs/use_cases/graph/integrations/graph_memgraph_qa/": {"canonical": "/docs/integrations/graphs/memgraph/"},
"/docs/use_cases/graph/integrations/graph_nebula_qa/": {"canonical": "/docs/integrations/graphs/nebula_graph/"},
"/docs/use_cases/graph/integrations/graph_networkx_qa/": {"canonical": "/docs/integrations/graphs/networkx/"},
"/docs/use_cases/graph/integrations/graph_ontotext_graphdb_qa/": {"canonical": "/docs/integrations/graphs/ontotext/"},
"/docs/use_cases/graph/integrations/graph_sparql_qa/": {"canonical": "/docs/integrations/graphs/rdflib_sparql/"},
"/docs/use_cases/graph/integrations/neptune_cypher_qa/": {"canonical": "/docs/integrations/graphs/amazon_neptune_open_cypher/"},
"/docs/use_cases/graph/integrations/neptune_sparql_qa/": {"canonical": "/docs/integrations/graphs/amazon_neptune_sparql/"},
"/docs/integrations/providers/facebook_chat/": {"canonical": "/docs/integrations/providers/facebook/"},
"/docs/integrations/providers/facebook_faiss/": {"canonical": "/docs/integrations/providers/facebook/"},
"/docs/integrations/memory/google_cloud_sql_mssql/": {"canonical": "/docs/integrations/memory/google_sql_mssql/"},
"/docs/integrations/memory/google_cloud_sql_mysql/": {"canonical": "/docs/integrations/memory/google_sql_mysql/"},
"/docs/integrations/memory/google_cloud_sql_pg/": {"canonical": "/docs/integrations/memory/google_sql_pg/"},
"/docs/integrations/memory/google_datastore/": {"canonical": "/docs/integrations/memory/google_firestore_datastore/"},
"/docs/integrations/llms/huggingface_textgen_inference/": {"canonical": "/docs/integrations/llms/huggingface_endpoint/"},
"/docs/integrations/llms/huggingface_hub/": {"canonical": "/docs/integrations/llms/huggingface_endpoint/"},
"/docs/integrations/llms/bigdl/": {"canonical": "/docs/integrations/llms/ipex_llm/"},
"/docs/integrations/llms/watsonxllm/": {"canonical": "/docs/integrations/llms/ibm_watsonx/"},
"/docs/integrations/llms/pai_eas_endpoint/": {"canonical": "/docs/integrations/llms/alibabacloud_pai_eas_endpoint/"},
"/docs/integrations/vectorstores/hanavector/": {"canonical": "/docs/integrations/vectorstores/sap_hanavector/"},
"/docs/use_cases/qa_structured/sql/": {"canonical": "/docs/tutorials/sql_qa/", "alternative": ["/v0.1/docs/use_cases/sql/"]},
"/docs/contributing/packages/": {"canonical": "/docs/versions/release_policy/", "alternative": ["/v0.1/docs/packages/"]},
"/docs/community/": {"canonical": "/docs/contributing/"},
"/docs/modules/chains/(.+)/": {"canonical": "/docs/versions/migrating_chains/", "alternative": ["/v0.1/docs/modules/chains/"]},
"/docs/modules/agents/how_to/custom_llm_agent/": {"canonical": "/docs/how_to/migrate_agent/", "alternative": ["/v0.1/docs/modules/agents/how_to/custom_agent/"]},
"/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent/": {"canonical": "/docs/how_to/migrate_agent/", "alternative": ["/v0.1/docs/modules/agents/how_to/custom_agent/"]},
"/docs/modules/agents/how_to/custom_llm_chat_agent/": {"canonical": "/docs/how_to/migrate_agent/", "alternative": ["/v0.1/docs/modules/agents/how_to/custom_agent/"]},
"/docs/modules/agents/how_to/custom_mrkl_agent/": {"canonical": "/docs/how_to/migrate_agent/", "alternative": ["/v0.1/docs/modules/agents/how_to/custom_agent/"]},
"/docs/modules/agents/how_to/streaming_stdout_final_only/": {"canonical": "/docs/how_to/migrate_agent/", "alternative": ["/v0.1/docs/modules/agents/how_to/streaming/"]},
"/docs/modules/model_io/prompts/prompts_pipelining/": {"canonical": "/docs/how_to/prompts_composition/", "alternative": ["/v0.1/docs/modules/model_io/prompts/composition/"]},
"/docs/modules/model_io/output_parsers/enum/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/enum/"]},
"/docs/modules/model_io/output_parsers/pandas_dataframe/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/pandas_dataframe/"]},
"/docs/modules/model_io/output_parsers/structured/": {"canonical": "/docs/how_to/output_parser_structured/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/structured/"]},
"/docs/modules/model_io/output_parsers/xml/": {"canonical": "/docs/how_to/output_parser_xml/", "alternative": ["/v0.1/docs/modules/model_io/output_parsers/types/xml/"]},
"/docs/use_cases/question_answering/code_understanding/": {"canonical": "https://langchain-ai.github.io/langgraph/tutorials/code_assistant/langgraph_code_assistant/", "alternative": ["/v0.1/docs/use_cases/code_understanding/"]},
"/docs/use_cases/question_answering/document-context-aware-QA/": {"canonical": "/docs/how_to/#text-splitters", "alternative": ["/v0.1/docs/modules/data_connection/document_transformers/"]},
"/docs/integrations/providers/alibabacloud_opensearch/": {"canonical": "/docs/integrations/providers/alibaba_cloud/"},
"/docs/integrations/chat/pai_eas_chat_endpoint/": {"canonical": "/docs/integrations/chat/alibaba_cloud_pai_eas/"},
"/docs/integrations/providers/tencentvectordb/": {"canonical": "/docs/integrations/providers/tencent/"},
"/docs/integrations/chat/hunyuan/": {"canonical": "/docs/integrations/chat/tencent_hunyuan/"},
"/docs/integrations/document_loaders/excel/": {"canonical": "/docs/integrations/document_loaders/microsoft_excel/"},
"/docs/integrations/document_loaders/onenote/": {"canonical": "/docs/integrations/document_loaders/microsoft_onenote/"},
"/docs/integrations/providers/aws_dynamodb/": {"canonical": "/docs/integrations/platforms/aws/"},
"/docs/integrations/providers/scann/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/toolkits/google_drive/": {"canonical": "/docs/integrations/tools/google_drive/"},
"/docs/use_cases/question_answering/chat_vector_db/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/in_memory_question_answering/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/multi_retrieval_qa_router/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/multiple_retrieval/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/vector_db_qa/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/vector_db_text_generation/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/guides/langsmith/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/langsmith/"]},
"/docs/guides/langsmith/walkthrough/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/langsmith/walkthrough/"]},
"/docs/use_cases/qa_structured/integrations/sqlite/": {"canonical": "/docs/tutorials/sql_qa/", "alternative": ["/v0.1/docs/use_cases/sql/"]},
"/docs/use_cases/more/data_generation/": {"canonical": "/docs/tutorials/data_generation/", "alternative": ["/v0.1/docs/use_cases/data_generation/"]},
"/docs/use_cases/question_answering/how_to/chat_vector_db/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/how_to/conversational_retrieval_agents/": {"canonical": "/docs/tutorials/qa_chat_history/", "alternative": ["/v0.1/docs/use_cases/question_answering/conversational_retrieval_agents/"]},
"/docs/use_cases/question_answering/question_answering/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/use_cases/question_answering/how_to/local_retrieval_qa/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/local_retrieval_qa/"]},
"/docs/use_cases/question_answering/how_to/question_answering/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/modules/agents/agents/examples/mrkl_chat(.html?)/": {"canonical": "/docs/how_to/#agents", "alternative": ["/v0.1/docs/modules/agents/"]},
"/docs/integrations/": {"canonical": "/docs/integrations/providers/"},
"/docs/expression_language/cookbook/routing/": {"canonical": "/docs/how_to/routing/", "alternative": ["/v0.1/docs/expression_language/how_to/routing/"]},
"/docs/guides/expression_language/": {"canonical": "/docs/how_to/#langchain-expression-language-lcel", "alternative": ["/v0.1/docs/expression_language/"]},
"/docs/integrations/providers/amazon_api_gateway/": {"canonical": "/docs/integrations/platforms/aws/"},
"/docs/integrations/providers/huggingface/": {"canonical": "/docs/integrations/platforms/huggingface/"},
"/docs/integrations/providers/azure_blob_storage/": {"canonical": "/docs/integrations/platforms/microsoft/"},
"/docs/integrations/providers/google_vertexai_matchingengine/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/providers/aws_s3/": {"canonical": "/docs/integrations/platforms/aws/"},
"/docs/integrations/providers/azure_openai/": {"canonical": "/docs/integrations/platforms/microsoft/"},
"/docs/integrations/providers/azure_cognitive_search_/": {"canonical": "/docs/integrations/platforms/microsoft/"},
"/docs/integrations/providers/bedrock/": {"canonical": "/docs/integrations/platforms/aws/"},
"/docs/integrations/providers/google_bigquery/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/providers/google_cloud_storage/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/providers/google_drive/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/providers/google_search/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/providers/microsoft_onedrive/": {"canonical": "/docs/integrations/platforms/microsoft/"},
"/docs/integrations/providers/microsoft_powerpoint/": {"canonical": "/docs/integrations/platforms/microsoft/"},
"/docs/integrations/providers/microsoft_word/": {"canonical": "/docs/integrations/platforms/microsoft/"},
"/docs/integrations/providers/sagemaker_endpoint/": {"canonical": "/docs/integrations/platforms/aws/"},
"/docs/integrations/providers/sagemaker_tracking/": {"canonical": "/docs/integrations/callbacks/sagemaker_tracking/"},
"/docs/integrations/providers/openai/": {"canonical": "/docs/integrations/platforms/openai/"},
"/docs/integrations/cassandra/": {"canonical": "/docs/integrations/providers/cassandra/"},
"/docs/integrations/providers/providers/semadb/": {"canonical": "/docs/integrations/providers/semadb/"},
"/docs/integrations/vectorstores/vectorstores/semadb/": {"canonical": "/docs/integrations/vectorstores/semadb/"},
"/docs/integrations/vectorstores/async_faiss/": {"canonical": "/docs/integrations/vectorstores/faiss_async/"},
"/docs/integrations/vectorstores/matchingengine/": {"canonical": "/docs/integrations/vectorstores/google_vertex_ai_vector_search/"},
"/docs/integrations/tools/sqlite/": {"canonical": "/docs/tutorials/sql_qa/", "alternative": ["/v0.1/docs/use_cases/sql/"]},
"/docs/integrations/document_loaders/pdf-amazonTextractPDFLoader/": {"canonical": "/docs/integrations/document_loaders/amazon_textract/"},
"/docs/integrations/document_loaders/Etherscan/": {"canonical": "/docs/integrations/document_loaders/etherscan/"},
"/docs/integrations/document_loaders/merge_doc_loader/": {"canonical": "/docs/integrations/document_loaders/merge_doc/"},
"/docs/integrations/document_loaders/recursive_url_loader/": {"canonical": "/docs/integrations/document_loaders/recursive_url/"},
"/docs/integrations/providers/google_document_ai/": {"canonical": "/docs/integrations/platforms/google/"},
"/docs/integrations/memory/motorhead_memory_managed/": {"canonical": "/docs/integrations/memory/motorhead_memory/"},
"/docs/integrations/memory/dynamodb_chat_message_history/": {"canonical": "/docs/integrations/memory/aws_dynamodb/"},
"/docs/integrations/memory/entity_memory_with_sqlite/": {"canonical": "/docs/integrations/memory/sqlite/"},
"/docs/modules/model_io/chat/integrations/anthropic/": {"canonical": "/docs/integrations/chat/anthropic/"},
"/docs/modules/model_io/chat/integrations/azure_chat_openai/": {"canonical": "/docs/integrations/chat/azure_chat_openai/"},
"/docs/modules/model_io/chat/integrations/google_vertex_ai_palm/": {"canonical": "/docs/integrations/chat/google_vertex_ai_palm/"},
"/docs/modules/model_io/chat/integrations/openai/": {"canonical": "/docs/integrations/chat/openai/"},
"/docs/modules/model_io/chat/integrations/promptlayer_chatopenai/": {"canonical": "/docs/integrations/chat/promptlayer_chatopenai/"},
"/docs/modules/model_io/llms/integrations/ai21/": {"canonical": "/docs/integrations/llms/ai21/"},
"/docs/modules/model_io/llms/integrations/aleph_alpha/": {"canonical": "/docs/integrations/llms/aleph_alpha/"},
"/docs/modules/model_io/llms/integrations/anyscale/": {"canonical": "/docs/integrations/llms/anyscale/"},
"/docs/modules/model_io/llms/integrations/banana/": {"canonical": "/docs/integrations/llms/banana/"},
"/docs/modules/model_io/llms/integrations/baseten/": {"canonical": "/docs/integrations/llms/baseten/"},
"/docs/modules/model_io/llms/integrations/beam/": {"canonical": "/docs/integrations/llms/beam/"},
"/docs/modules/model_io/llms/integrations/bedrock/": {"canonical": "/docs/integrations/llms/bedrock/"},
"/docs/modules/model_io/llms/integrations/cohere/": {"canonical": "/docs/integrations/llms/cohere/"},
"/docs/modules/model_io/llms/integrations/ctransformers/": {"canonical": "/docs/integrations/llms/ctransformers/"},
"/docs/modules/model_io/llms/integrations/databricks/": {"canonical": "/docs/integrations/llms/databricks/"},
"/docs/modules/model_io/llms/integrations/google_vertex_ai_palm/": {"canonical": "/docs/integrations/llms/google_vertex_ai_palm/"},
"/docs/modules/model_io/llms/integrations/huggingface_pipelines/": {"canonical": "/docs/integrations/llms/huggingface_pipelines/"},
"/docs/modules/model_io/llms/integrations/jsonformer_experimental/": {"canonical": "/docs/integrations/llms/jsonformer_experimental/"},
"/docs/modules/model_io/llms/integrations/llamacpp/": {"canonical": "/docs/integrations/llms/llamacpp/"},
"/docs/modules/model_io/llms/integrations/manifest/": {"canonical": "/docs/integrations/llms/manifest/"},
"/docs/modules/model_io/llms/integrations/modal/": {"canonical": "/docs/integrations/llms/modal/"},
"/docs/modules/model_io/llms/integrations/mosaicml/": {"canonical": "/docs/integrations/llms/mosaicml/"},
"/docs/modules/model_io/llms/integrations/nlpcloud/": {"canonical": "/docs/integrations/llms/nlpcloud/"},
"/docs/modules/model_io/llms/integrations/openai/": {"canonical": "/docs/integrations/llms/openai/"},
"/docs/modules/model_io/llms/integrations/openlm/": {"canonical": "/docs/integrations/llms/openlm/"},
"/docs/modules/model_io/llms/integrations/predictionguard/": {"canonical": "/docs/integrations/llms/predictionguard/"},
"/docs/modules/model_io/llms/integrations/promptlayer_openai/": {"canonical": "/docs/integrations/llms/promptlayer_openai/"},
"/docs/modules/model_io/llms/integrations/rellm_experimental/": {"canonical": "/docs/integrations/llms/rellm_experimental/"},
"/docs/modules/model_io/llms/integrations/replicate/": {"canonical": "/docs/integrations/llms/replicate/"},
"/docs/modules/model_io/llms/integrations/runhouse/": {"canonical": "/docs/integrations/llms/runhouse/"},
"/docs/modules/model_io/llms/integrations/sagemaker/": {"canonical": "/docs/integrations/llms/sagemaker/"},
"/docs/modules/model_io/llms/integrations/stochasticai/": {"canonical": "/docs/integrations/llms/stochasticai/"},
"/docs/modules/model_io/llms/integrations/writer/": {"canonical": "/docs/integrations/llms/writer/"},
"/en/latest/use_cases/apis.html/": {"canonical": null, "alternative": ["/v0.1/docs/use_cases/apis/"]},
"/en/latest/use_cases/extraction.html/": {"canonical": "/docs/tutorials/extraction/", "alternative": ["/v0.1/docs/use_cases/extraction/"]},
"/en/latest/use_cases/summarization.html/": {"canonical": "/docs/tutorials/summarization/", "alternative": ["/v0.1/docs/use_cases/summarization/"]},
"/en/latest/use_cases/tabular.html/": {"canonical": "/docs/tutorials/sql_qa/", "alternative": ["/v0.1/docs/use_cases/sql/"]},
"/en/latest/youtube.html/": {"canonical": "/docs/additional_resources/youtube/"},
"/docs/": {"canonical": "/"},
"/en/latest/": {"canonical": "/"},
"/en/latest/index.html/": {"canonical": "/"},
"/en/latest/modules/models.html/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/"]},
"/docs/integrations/retrievers/google_cloud_enterprise_search/": {"canonical": "/docs/integrations/retrievers/google_vertex_ai_search/"},
"/docs/integrations/tools/metaphor_search/": {"canonical": "/docs/integrations/tools/exa_search/"},
"/docs/expression_language/how_to/fallbacks/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/fallbacks/"]},
"/docs/expression_language/cookbook/retrieval/": {"canonical": "/docs/tutorials/rag/", "alternative": ["/v0.1/docs/use_cases/question_answering/"]},
"/docs/expression_language/cookbook/agent/": {"canonical": "/docs/how_to/migrate_agent/", "alternative": ["/v0.1/docs/modules/agents/agent_types/xml_agent/"]},
"/docs/modules/model_io/prompts/message_prompts/": {"canonical": "/docs/how_to/#prompt-templates", "alternative": ["/v0.1/docs/modules/model_io/prompts/quick_start/"]},
"/docs/modules/model_io/prompts/pipeline/": {"canonical": "/docs/how_to/prompts_composition/", "alternative": ["/v0.1/docs/modules/model_io/prompts/composition/"]},
"/docs/expression_language/cookbook/memory/": {"canonical": "/docs/how_to/chatbots_memory/", "alternative": ["/v0.1/docs/modules/memory/"]},
"/docs/expression_language/cookbook/tools/": {"canonical": "/docs/tutorials/agents/", "alternative": ["/v0.1/docs/use_cases/tool_use/quickstart/"]},
"/docs/expression_language/cookbook/sql_db/": {"canonical": "/docs/tutorials/sql_qa/", "alternative": ["/v0.1/docs/use_cases/sql/quickstart/"]},
"/docs/expression_language/cookbook/moderation/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/safety/moderation/"]},
"/docs/expression_language/cookbook/embedding_router/": {"canonical": "/docs/how_to/routing/", "alternative": ["/v0.1/docs/expression_language/how_to/routing/"]},
"/docs/guides/structured_output/": {"canonical": "/docs/how_to/structured_output/", "alternative": ["/v0.1/docs/modules/model_io/chat/structured_output/"]},
"/docs/modules/agents/how_to/structured_tools/": {"canonical": "/docs/how_to/#tools", "alternative": ["/v0.1/docs/modules/tools/"]},
"/docs/use_cases/csv/": {"canonical": "/docs/tutorials/sql_qa/", "alternative": ["/v0.1/docs/use_cases/sql/csv/"]},
"/docs/guides/debugging/": {"canonical": "/docs/how_to/debugging/", "alternative": ["/v0.1/docs/guides/development/debugging/"]},
"/docs/guides/extending_langchain/": {"canonical": "/docs/how_to/#custom", "alternative": ["/v0.1/docs/guides/development/extending_langchain/"]},
"/docs/guides/fallbacks/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/fallbacks/"]},
"/docs/guides/model_laboratory/": {"canonical": "https://docs.smith.langchain.com/", "alternative": ["/v0.1/docs/guides/productionization/evaluation/"]},
"/docs/guides/pydantic_compatibility/": {"canonical": "/docs/how_to/pydantic_compatibility/", "alternative": ["/v0.1/docs/guides/development/pydantic_compatibility/"]},
"/docs/guides/local_llms/": {"canonical": "/docs/how_to/local_llms/", "alternative": ["/v0.1/docs/guides/development/local_llms/"]},
"/docs/modules/model_io/quick_start/": {"canonical": "/docs/how_to/#chat-models", "alternative": ["/v0.1/docs/modules/model_io/"]},
"/docs/expression_language/how_to/generators/": {"canonical": "/docs/how_to/functions/", "alternative": ["/v0.1/docs/expression_language/primitives/functions/"]},
"/docs/expression_language/how_to/functions/": {"canonical": "/docs/how_to/functions/", "alternative": ["/v0.1/docs/expression_language/primitives/functions/"]},
"/docs/expression_language/how_to/passthrough/": {"canonical": "/docs/how_to/passthrough/", "alternative": ["/v0.1/docs/expression_language/primitives/passthrough/"]},
"/docs/expression_language/how_to/map/": {"canonical": "/docs/how_to/parallel/", "alternative": ["/v0.1/docs/expression_language/primitives/parallel/"]},
"/docs/expression_language/how_to/binding/": {"canonical": "/docs/how_to/binding/", "alternative": ["/v0.1/docs/expression_language/primitives/binding/"]},
"/docs/expression_language/how_to/configure/": {"canonical": "/docs/how_to/configure/", "alternative": ["/v0.1/docs/expression_language/primitives/configure/"]},
"/docs/expression_language/cookbook/prompt_llm_parser/": {"canonical": "/docs/how_to/sequence/", "alternative": ["/v0.1/docs/expression_language/get_started/"]},
"/docs/contributing/documentation/": {"canonical": "/docs/contributing/documentation/", "alternative": ["/v0.1/docs/contributing/documentation/technical_logistics/"]},
"/docs/expression_language/cookbook/": {"canonical": "/docs/how_to/#langchain-expression-language-lcel", "alternative": ["/v0.1/docs/expression_language/"]},
"/docs/integrations/text_embedding/solar/": {"canonical": "/docs/integrations/text_embedding/upstage/"},
"/docs/integrations/chat/solar/": {"canonical": "/docs/integrations/chat/upstage/"},
// custom ones
"/docs/modules/model_io/chat/llm_chain/": {
"canonical": "/docs/tutorials/llm_chain/"
},
"/docs/modules/agents/toolkits/": {
"canonical": "/docs/integrations/tools/",
"alternative": [
"/v0.1/docs/integrations/toolkits/"
]
}
}

View File

@@ -26,26 +26,6 @@
}
],
"redirects": [
{
"source": "/v0.3/docs/:path(.*/?)*",
"destination": "/docs/:path*"
},
{
"source": "/docs/modules/agents/tools/custom_tools(/?)",
"destination": "/docs/how_to/custom_tools/"
},
{
"source": "/docs/expression_language(/?)",
"destination": "/docs/concepts/#langchain-expression-language-lcel"
},
{
"source": "/docs/expression_language/interface(/?)",
"destination": "/docs/concepts/#runnable-interface"
},
{
"source": "/docs/versions/overview(/?)",
"destination": "/docs/versions/v0_2/overview/"
},
{
"source": "/docs/how_to/tool_calls_multi_modal(/?)",
"destination": "/docs/how_to/multimodal_inputs/"
@@ -77,10 +57,6 @@
{
"source": "/v0.2/docs/templates/:path(.*/?)*",
"destination": "https://github.com/langchain-ai/langchain/tree/master/templates/:path*"
},
{
"source": "/docs/integrations/providers/mlflow_ai_gateway(/?)",
"destination": "/docs/integrations/providers/mlflow/"
}
]
}

View File

@@ -13,7 +13,7 @@ license = "MIT"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
langchain-core = "^0.3.0"
langchain-core = "^0.3.0.dev"
[tool.poetry.group.test]
optional = true

View File

@@ -5,7 +5,6 @@ from pathlib import Path
import rich
import typer
from gritql import run
from typer import Option
def get_gritdir_path() -> Path:
@@ -16,28 +15,15 @@ def get_gritdir_path() -> Path:
def migrate(
ctx: typer.Context,
# Using diff instead of dry-run for backwards compatibility with the old CLI
diff: bool = Option(
False,
"--diff",
help="Show the changes that would be made without applying them.",
),
interactive: bool = Option(
False,
"--interactive",
help="Prompt for confirmation before making each change",
),
) -> None:
"""Migrate langchain to the most recent version.
Any undocumented arguments will be passed to the Grit CLI.
"""
rich.print(
"✈️ This script will help you migrate to a LangChain 0.3. "
"✈️ This script will help you migrate to a recent version LangChain. "
"This migration script will attempt to replace old imports in the code "
"with new ones. "
"If you need to migrate to LangChain 0.2, please downgrade to version 0.0.29 "
"of the langchain-cli.\n\n"
"with new ones.\n\n"
"🔄 You will need to run the migration script TWICE to migrate (e.g., "
"to update llms import from langchain, the script will first move them to "
"corresponding imports from the community package, and on the second "
@@ -59,15 +45,9 @@ def migrate(
rich.print("-" * 10)
rich.print()
args = list(ctx.args)
if interactive:
args.append("--interactive")
if diff:
args.append("--dry-run")
final_code = run.apply_pattern(
"langchain_all_migrations()",
args,
ctx.args,
grit_dir=get_gritdir_path(),
)

View File

@@ -7,7 +7,7 @@ readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
langchain-core = "^0.3.0"
langchain-core = "^0.3.0.dev"
langchain-openai = ">=0.0.1"

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-cli"
version = "0.0.31"
version = "0.0.30"
description = "CLI for interacting with LangChain"
authors = ["Erick Friis <erick@langchain.dev>"]
readme = "README.md"

View File

@@ -15,7 +15,7 @@ LangChain Community contains third-party integrations that implement the base in
For full documentation see the [API reference](https://api.python.langchain.com/en/stable/community_api_reference.html).
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](https://raw.githubusercontent.com/langchain-ai/langchain/e1d113ea84a2edcf4a7709fc5be0e972ea74a5d9/docs/static/svg/langchain_stack_062024.svg "LangChain Framework Overview")
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](../../docs/static/svg/langchain_stack_062024.svg "LangChain Framework Overview")
## 📕 Releases & Versioning

View File

@@ -301,7 +301,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable):
inheritable_metadata=config.get("metadata"),
)
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name") or self.get_name()
dumpd(self), input, name=config.get("run_name")
)
files = _convert_file_ids_into_attachments(kwargs.get("file_ids", []))
@@ -437,7 +437,7 @@ class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable):
inheritable_metadata=config.get("metadata"),
)
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name") or self.get_name()
dumpd(self), input, name=config.get("run_name")
)
files = _convert_file_ids_into_attachments(kwargs.get("file_ids", []))

View File

@@ -8,18 +8,6 @@ from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatGeneration, LLMResult
MODEL_COST_PER_1K_TOKENS = {
# OpenAI o1-preview input
"o1-preview": 0.015,
"o1-preview-2024-09-12": 0.015,
# OpenAI o1-preview output
"o1-preview-completion": 0.06,
"o1-preview-2024-09-12-completion": 0.06,
# OpenAI o1-mini input
"o1-mini": 0.003,
"o1-mini-2024-09-12": 0.003,
# OpenAI o1-mini output
"o1-mini-completion": 0.012,
"o1-mini-2024-09-12-completion": 0.012,
# GPT-4o-mini input
"gpt-4o-mini": 0.00015,
"gpt-4o-mini-2024-07-18": 0.00015,
@@ -165,7 +153,6 @@ def standardize_model_name(
model_name.startswith("gpt-4")
or model_name.startswith("gpt-3.5")
or model_name.startswith("gpt-35")
or model_name.startswith("o1-")
or ("finetuned" in model_name and "legacy" not in model_name)
):
return model_name + "-completion"

View File

@@ -53,15 +53,13 @@ class LLMThoughtLabeler:
labeling logic.
"""
@staticmethod
def get_initial_label() -> str:
def get_initial_label(self) -> str:
"""Return the markdown label for a new LLMThought that doesn't have
an associated tool yet.
"""
return f"{THINKING_EMOJI} **Thinking...**"
@staticmethod
def get_tool_label(tool: ToolRecord, is_complete: bool) -> str:
def get_tool_label(self, tool: ToolRecord, is_complete: bool) -> str:
"""Return the label for an LLMThought that has an associated
tool.
@@ -93,15 +91,13 @@ class LLMThoughtLabeler:
label = f"{emoji} **{name}:** {input}"
return label
@staticmethod
def get_history_label() -> str:
def get_history_label(self) -> str:
"""Return a markdown label for the special 'history' container
that contains overflow thoughts.
"""
return f"{HISTORY_EMOJI} **History**"
@staticmethod
def get_final_agent_thought_label() -> str:
def get_final_agent_thought_label(self) -> str:
"""Return the markdown label for the agent's final thought -
the "Now I have the answer" thought, that doesn't involve
a tool.

View File

@@ -204,7 +204,7 @@ def _convert_delta_to_message_chunk(
role = dct.get("role")
content = dct.get("content", "")
additional_kwargs = {}
tool_calls = dct.get("tool_calls", None)
tool_calls = dct.get("tool_call", None)
if tool_calls is not None:
additional_kwargs["tool_calls"] = tool_calls

View File

@@ -359,7 +359,6 @@ if TYPE_CHECKING:
)
from langchain_community.document_loaders.pebblo import (
PebbloSafeLoader,
PebbloTextLoader,
)
from langchain_community.document_loaders.polars_dataframe import (
PolarsDataFrameLoader,
@@ -651,7 +650,6 @@ _module_lookup = {
"PDFPlumberLoader": "langchain_community.document_loaders.pdf",
"PagedPDFSplitter": "langchain_community.document_loaders.pdf",
"PebbloSafeLoader": "langchain_community.document_loaders.pebblo",
"PebbloTextLoader": "langchain_community.document_loaders.pebblo",
"PlaywrightURLLoader": "langchain_community.document_loaders.url_playwright",
"PolarsDataFrameLoader": "langchain_community.document_loaders.polars_dataframe",
"PsychicLoader": "langchain_community.document_loaders.psychic",
@@ -857,7 +855,6 @@ __all__ = [
"PDFPlumberLoader",
"PagedPDFSplitter",
"PebbloSafeLoader",
"PebbloTextLoader",
"PlaywrightURLLoader",
"PolarsDataFrameLoader",
"PsychicLoader",

View File

@@ -20,37 +20,13 @@ class MongodbLoader(BaseLoader):
*,
filter_criteria: Optional[Dict] = None,
field_names: Optional[Sequence[str]] = None,
metadata_names: Optional[Sequence[str]] = None,
include_db_collection_in_metadata: bool = True,
) -> None:
"""
Initializes the MongoDB loader with necessary database connection
details and configurations.
Args:
connection_string (str): MongoDB connection URI.
db_name (str):Name of the database to connect to.
collection_name (str): Name of the collection to fetch documents from.
filter_criteria (Optional[Dict]): MongoDB filter criteria for querying
documents.
field_names (Optional[Sequence[str]]): List of field names to retrieve
from documents.
metadata_names (Optional[Sequence[str]]): Additional metadata fields to
extract from documents.
include_db_collection_in_metadata (bool): Flag to include database and
collection names in metadata.
Raises:
ImportError: If the motor library is not installed.
ValueError: If any necessary argument is missing.
"""
try:
from motor.motor_asyncio import AsyncIOMotorClient
except ImportError as e:
raise ImportError(
"Cannot import from motor, please install with `pip install motor`."
) from e
if not connection_string:
raise ValueError("connection_string must be provided.")
@@ -63,10 +39,8 @@ class MongodbLoader(BaseLoader):
self.client = AsyncIOMotorClient(connection_string)
self.db_name = db_name
self.collection_name = collection_name
self.field_names = field_names or []
self.field_names = field_names
self.filter_criteria = filter_criteria or {}
self.metadata_names = metadata_names or []
self.include_db_collection_in_metadata = include_db_collection_in_metadata
self.db = self.client.get_database(db_name)
self.collection = self.db.get_collection(collection_name)
@@ -86,24 +60,36 @@ class MongodbLoader(BaseLoader):
return asyncio.run(self.aload())
async def aload(self) -> List[Document]:
"""Asynchronously loads data into Document objects."""
"""Load data into Document objects."""
result = []
total_docs = await self.collection.count_documents(self.filter_criteria)
projection = self._construct_projection()
# Construct the projection dictionary if field_names are specified
projection = (
{field: 1 for field in self.field_names} if self.field_names else None
)
async for doc in self.collection.find(self.filter_criteria, projection):
metadata = self._extract_fields(doc, self.metadata_names, default="")
# Optionally add database and collection names to metadata
if self.include_db_collection_in_metadata:
metadata.update(
{"database": self.db_name, "collection": self.collection_name}
)
metadata = {
"database": self.db_name,
"collection": self.collection_name,
}
# Extract text content from filtered fields or use the entire document
if self.field_names is not None:
fields = self._extract_fields(doc, self.field_names, default="")
fields = {}
for name in self.field_names:
# Split the field names to handle nested fields
keys = name.split(".")
value = doc
for key in keys:
if key in value:
value = value[key]
else:
value = ""
break
fields[name] = value
texts = [str(value) for value in fields.values()]
text = " ".join(texts)
else:
@@ -118,29 +104,3 @@ class MongodbLoader(BaseLoader):
)
return result
def _construct_projection(self) -> Optional[Dict]:
"""Constructs the projection dictionary for MongoDB query based
on the specified field names and metadata names."""
field_names = list(self.field_names) or []
metadata_names = list(self.metadata_names) or []
all_fields = field_names + metadata_names
return {field: 1 for field in all_fields} if all_fields else None
def _extract_fields(
self,
document: Dict,
fields: Sequence[str],
default: str = "",
) -> Dict:
"""Extracts and returns values for specified fields from a document."""
extracted = {}
for field in fields or []:
value = document
for key in field.split("."):
value = value.get(key, default)
if value == default:
break
new_field_name = field.replace(".", "_")
extracted[new_field_name] = value
return extracted

View File

@@ -267,7 +267,6 @@ class PyMuPDFParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[valid-type]
"""Lazily parse the blob."""
import fitz
with blob.as_bytes_io() as file_path: # type: ignore[attr-defined]
@@ -278,49 +277,25 @@ class PyMuPDFParser(BaseBlobParser):
yield from [
Document(
page_content=self._get_page_content(doc, page, blob),
metadata=self._extract_metadata(doc, page, blob),
page_content=page.get_text(**self.text_kwargs)
+ self._extract_images_from_page(doc, page),
metadata=dict(
{
"source": blob.source, # type: ignore[attr-defined]
"file_path": blob.source, # type: ignore[attr-defined]
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc
]
def _get_page_content(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page, blob: Blob
) -> str:
"""
Get the text of the page using PyMuPDF and RapidOCR and issue a warning
if it is empty.
"""
content = page.get_text(**self.text_kwargs) + self._extract_images_from_page(
doc, page
)
if not content:
warnings.warn(
f"Warning: Empty content on page "
f"{page.number} of document {blob.source}"
)
return content
def _extract_metadata(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page, blob: Blob
) -> dict:
"""Extract metadata from the document and page."""
return dict(
{
"source": blob.source, # type: ignore[attr-defined]
"file_path": blob.source, # type: ignore[attr-defined]
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if isinstance(doc.metadata[k], (str, int))
},
)
def _extract_images_from_page(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page
) -> str:

View File

@@ -4,7 +4,7 @@ import logging
import os
import uuid
from importlib.metadata import version
from typing import Any, Dict, Iterable, Iterator, List, Optional
from typing import Dict, Iterator, List, Optional
from langchain_core.documents import Document
@@ -271,67 +271,3 @@ class PebbloSafeLoader(BaseLoader):
doc_metadata["pb_checksum"] = classified_docs.get(doc.pb_id, {}).get(
"pb_checksum", None
)
class PebbloTextLoader(BaseLoader):
"""
Loader for text data.
Since PebbloSafeLoader is a wrapper around document loaders, this loader is
used to load text data directly into Documents.
"""
def __init__(
self,
texts: Iterable[str],
*,
source: Optional[str] = None,
ids: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
metadatas: Optional[List[Dict[str, Any]]] = None,
) -> None:
"""
Args:
texts: Iterable of text data.
source: Source of the text data.
Optional. Defaults to None.
ids: List of unique identifiers for each text.
Optional. Defaults to None.
metadata: Metadata for all texts.
Optional. Defaults to None.
metadatas: List of metadata for each text.
Optional. Defaults to None.
"""
self.texts = texts
self.source = source
self.ids = ids
self.metadata = metadata
self.metadatas = metadatas
def lazy_load(self) -> Iterator[Document]:
"""
Lazy load text data into Documents.
Returns:
Iterator of Documents
"""
for i, text in enumerate(self.texts):
_id = None
metadata = self.metadata or {}
if self.metadatas and i < len(self.metadatas) and self.metadatas[i]:
metadata.update(self.metadatas[i])
if self.ids and i < len(self.ids):
_id = self.ids[i]
yield Document(id=_id, page_content=text, metadata=metadata)
def load(self) -> List[Document]:
"""
Load text data into Documents.
Returns:
List of Documents
"""
documents = []
for doc in self.lazy_load():
documents.append(doc)
return documents

View File

@@ -227,7 +227,7 @@ class RecursiveUrlLoader(BaseLoader):
"https://docs.python.org/3.9/",
prevent_outside=True,
base_url="https://docs.python.org",
link_regex=r'<a\\s+(?:[^>]*?\\s+)?href="([^"]*(?=index)[^"]*)"',
link_regex=r'<a\s+(?:[^>]*?\s+)?href="([^"]*(?=index)[^"]*)"',
exclude_dirs=['https://docs.python.org/3.9/faq']
)
docs = loader.load()

View File

@@ -132,7 +132,6 @@ class BeautifulSoupTransformer(BaseDocumentTransformer):
Args:
html_content: The original HTML content string.
tags: A list of tags to be extracted from the HTML.
remove_comments: If set to True, the comments will be removed.
Returns:
A string combining the content of the extracted tags.
@@ -185,7 +184,6 @@ def get_navigable_strings(
Args:
element: A BeautifulSoup element.
remove_comments: If set to True, the comments will be removed.
Returns:
A generator of strings.

View File

@@ -213,7 +213,7 @@ class SambaStudioEmbeddings(BaseModel, Embeddings):
)
try:
if params.get("select_expert"):
embedding = response.json()["predictions"]
embedding = response.json()["predictions"][0]
else:
embedding = response.json()["predictions"]
embeddings.extend(embedding)
@@ -299,7 +299,7 @@ class SambaStudioEmbeddings(BaseModel, Embeddings):
)
try:
if params.get("select_expert"):
embedding = response.json()["predictions"][0]
embedding = response.json()["predictions"][0][0]
else:
embedding = response.json()["predictions"][0]
except KeyError:

View File

@@ -1,840 +1,7 @@
from __future__ import annotations
from abc import abstractmethod
from collections.abc import AsyncIterable, Collection, Iterable, Iterator
from typing import (
Any,
ClassVar,
Optional,
from langchain_core.graph_vectorstores.base import (
GraphVectorStore,
GraphVectorStoreRetriever,
Node,
)
from langchain_core._api import beta
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.load import Serializable
from langchain_core.runnables import run_in_executor
from langchain_core.vectorstores import VectorStore, VectorStoreRetriever
from pydantic import Field
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
def _has_next(iterator: Iterator) -> bool:
"""Checks if the iterator has more elements.
Warning: consumes an element from the iterator"""
sentinel = object()
return next(iterator, sentinel) is not sentinel
@beta()
class Node(Serializable):
"""Node in the GraphVectorStore.
Edges exist from nodes with an outgoing link to nodes with a matching incoming link.
For instance two nodes `a` and `b` connected over a hyperlink ``https://some-url``
would look like:
.. code-block:: python
[
Node(
id="a",
text="some text a",
links= [
Link(kind="hyperlink", tag="https://some-url", direction="incoming")
],
),
Node(
id="b",
text="some text b",
links= [
Link(kind="hyperlink", tag="https://some-url", direction="outgoing")
],
)
]
"""
id: Optional[str] = None
"""Unique ID for the node. Will be generated by the GraphVectorStore if not set."""
text: str
"""Text contained by the node."""
metadata: dict = Field(default_factory=dict)
"""Metadata for the node."""
links: list[Link] = Field(default_factory=list)
"""Links associated with the node."""
def _texts_to_nodes(
texts: Iterable[str],
metadatas: Optional[Iterable[dict]],
ids: Optional[Iterable[str]],
) -> Iterator[Node]:
metadatas_it = iter(metadatas) if metadatas else None
ids_it = iter(ids) if ids else None
for text in texts:
try:
_metadata = next(metadatas_it).copy() if metadatas_it else {}
except StopIteration as e:
raise ValueError("texts iterable longer than metadatas") from e
try:
_id = next(ids_it) if ids_it else None
except StopIteration as e:
raise ValueError("texts iterable longer than ids") from e
links = _metadata.pop(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
links = list(links)
yield Node(
id=_id,
metadata=_metadata,
text=text,
links=links,
)
if ids_it and _has_next(ids_it):
raise ValueError("ids iterable longer than texts")
if metadatas_it and _has_next(metadatas_it):
raise ValueError("metadatas iterable longer than texts")
def _documents_to_nodes(documents: Iterable[Document]) -> Iterator[Node]:
for doc in documents:
metadata = doc.metadata.copy()
links = metadata.pop(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
links = list(links)
yield Node(
id=doc.id,
metadata=metadata,
text=doc.page_content,
links=links,
)
@beta()
def nodes_to_documents(nodes: Iterable[Node]) -> Iterator[Document]:
"""Convert nodes to documents.
Args:
nodes: The nodes to convert to documents.
Returns:
The documents generated from the nodes.
"""
for node in nodes:
metadata = node.metadata.copy()
metadata[METADATA_LINKS_KEY] = [
# Convert the core `Link` (from the node) back to the local `Link`.
Link(kind=link.kind, direction=link.direction, tag=link.tag)
for link in node.links
]
yield Document(
id=node.id,
page_content=node.text,
metadata=metadata,
)
@beta(message="Added in version 0.3.1 of langchain_community. API subject to change.")
class GraphVectorStore(VectorStore):
"""A hybrid vector-and-graph graph store.
Document chunks support vector-similarity search as well as edges linking
chunks based on structural and semantic properties.
.. versionadded:: 0.3.1
"""
@abstractmethod
def add_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> Iterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
"""
async def aadd_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> AsyncIterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
"""
iterator = iter(await run_in_executor(None, self.add_nodes, nodes, **kwargs))
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
*,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the vectorstore.
The Links present in the metadata field `links` will be extracted to create
the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_texts(
ids=["a", "b"],
texts=["some text a", "some text b"],
metadatas=[
{
"links": [
Link.incoming(kind="hyperlink", tag="https://some-url")
]
},
{
"links": [
Link.outgoing(kind="hyperlink", tag="https://some-url")
]
},
],
)
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
The metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
ids: Optional list of IDs associated with the texts.
**kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
nodes = _texts_to_nodes(texts, metadatas, ids)
return list(self.add_nodes(nodes, **kwargs))
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
*,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the vectorstore.
The Links present in the metadata field `links` will be extracted to create
the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
await store.aadd_texts(
ids=["a", "b"],
texts=["some text a", "some text b"],
metadatas=[
{
"links": [
Link.incoming(kind="hyperlink", tag="https://some-url")
]
},
{
"links": [
Link.outgoing(kind="hyperlink", tag="https://some-url")
]
},
],
)
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
The metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
ids: Optional list of IDs associated with the texts.
**kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
nodes = _texts_to_nodes(texts, metadatas, ids)
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
def add_documents(
self,
documents: Iterable[Document],
**kwargs: Any,
) -> list[str]:
"""Run more documents through the embeddings and add to the vectorstore.
The Links present in the document metadata field `links` will be extracted to
create the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_documents(
[
Document(
id="a",
page_content="some text a",
metadata={
"links": [
Link.incoming(kind="hyperlink", tag="http://some-url")
]
}
),
Document(
id="b",
page_content="some text b",
metadata={
"links": [
Link.outgoing(kind="hyperlink", tag="http://some-url")
]
}
),
]
)
Args:
documents: Documents to add to the vectorstore.
The document's metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
Returns:
List of IDs of the added texts.
"""
nodes = _documents_to_nodes(documents)
return list(self.add_nodes(nodes, **kwargs))
async def aadd_documents(
self,
documents: Iterable[Document],
**kwargs: Any,
) -> list[str]:
"""Run more documents through the embeddings and add to the vectorstore.
The Links present in the document metadata field `links` will be extracted to
create the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_documents(
[
Document(
id="a",
page_content="some text a",
metadata={
"links": [
Link.incoming(kind="hyperlink", tag="http://some-url")
]
}
),
Document(
id="b",
page_content="some text b",
metadata={
"links": [
Link.outgoing(kind="hyperlink", tag="http://some-url")
]
}
),
]
)
Args:
documents: Documents to add to the vectorstore.
The document's metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
Returns:
List of IDs of the added texts.
"""
nodes = _documents_to_nodes(documents)
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
@abstractmethod
def traversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from traversing this graph store.
First, `k` nodes are retrieved using a search for each `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial search.
Defaults to 4. Applies to each of the query strings.
depth: The maximum depth of edges to traverse. Defaults to 1.
Returns:
Retrieved documents.
"""
async def atraversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from traversing this graph store.
First, `k` nodes are retrieved using a search for each `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial search.
Defaults to 4. Applies to each of the query strings.
depth: The maximum depth of edges to traverse. Defaults to 1.
Returns:
Retrieved documents.
"""
iterator = iter(
await run_in_executor(
None, self.traversal_search, query, k=k, depth=depth, **kwargs
)
)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
@abstractmethod
def mmr_traversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch via similarity.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to negative infinity.
"""
async def ammr_traversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch via similarity.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to negative infinity.
"""
iterator = iter(
await run_in_executor(
None,
self.mmr_traversal_search,
query,
k=k,
fetch_k=fetch_k,
adjacent_k=adjacent_k,
depth=depth,
lambda_mult=lambda_mult,
score_threshold=score_threshold,
**kwargs,
)
)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
return list(self.traversal_search(query, k=k, depth=0))
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> list[Document]:
return list(
self.mmr_traversal_search(
query, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, depth=0
)
)
async def asimilarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
return [doc async for doc in self.atraversal_search(query, k=k, depth=0)]
def search(self, query: str, search_type: str, **kwargs: Any) -> list[Document]:
if search_type == "similarity":
return self.similarity_search(query, **kwargs)
elif search_type == "similarity_score_threshold":
docs_and_similarities = self.similarity_search_with_relevance_scores(
query, **kwargs
)
return [doc for doc, _ in docs_and_similarities]
elif search_type == "mmr":
return self.max_marginal_relevance_search(query, **kwargs)
elif search_type == "traversal":
return list(self.traversal_search(query, **kwargs))
elif search_type == "mmr_traversal":
return list(self.mmr_traversal_search(query, **kwargs))
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold', "
"'mmr' or 'traversal'."
)
async def asearch(
self, query: str, search_type: str, **kwargs: Any
) -> list[Document]:
if search_type == "similarity":
return await self.asimilarity_search(query, **kwargs)
elif search_type == "similarity_score_threshold":
docs_and_similarities = await self.asimilarity_search_with_relevance_scores(
query, **kwargs
)
return [doc for doc, _ in docs_and_similarities]
elif search_type == "mmr":
return await self.amax_marginal_relevance_search(query, **kwargs)
elif search_type == "traversal":
return [doc async for doc in self.atraversal_search(query, **kwargs)]
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold', "
"'mmr' or 'traversal'."
)
def as_retriever(self, **kwargs: Any) -> GraphVectorStoreRetriever:
"""Return GraphVectorStoreRetriever initialized from this GraphVectorStore.
Args:
**kwargs: Keyword arguments to pass to the search function.
Can include:
- search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be ``traversal`` (default), ``similarity``, ``mmr``, or
``similarity_score_threshold``.
- search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
- k(int): Amount of documents to return (Default: 4).
- depth(int): The maximum depth of edges to traverse (Default: 1).
- score_threshold(float): Minimum relevance threshold
for similarity_score_threshold.
- fetch_k(int): Amount of documents to pass to MMR algorithm
(Default: 20).
- lambda_mult(float): Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5).
Returns:
Retriever for this GraphVectorStore.
Examples:
.. code-block:: python
# Retrieve documents traversing edges
docsearch.as_retriever(
search_type="traversal",
search_kwargs={'k': 6, 'depth': 3}
)
# Retrieve more documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr",
search_kwargs={'k': 5, 'fetch_k': 50}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
"""
return GraphVectorStoreRetriever(vectorstore=self, **kwargs)
@beta(message="Added in version 0.3.1 of langchain_community. API subject to change.")
class GraphVectorStoreRetriever(VectorStoreRetriever):
"""Retriever for GraphVectorStore.
A graph vector store retriever is a retriever that uses a graph vector store to
retrieve documents.
It is similar to a vector store retriever, except that it uses both vector
similarity and graph connections to retrieve documents.
It uses the search methods implemented by a graph vector store, like traversal
search and MMR traversal search, to query the texts in the graph vector store.
Example::
store = CassandraGraphVectorStore(...)
retriever = store.as_retriever()
retriever.invoke("What is ...")
.. seealso::
:mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
How to use a graph vector store as a retriever
==============================================
Creating a retriever from a graph vector store
----------------------------------------------
You can build a retriever from a graph vector store using its
:meth:`~langchain_community.graph_vectorstores.base.GraphVectorStore.as_retriever`
method.
First we instantiate a graph vector store.
We will use a store backed by Cassandra
:class:`~langchain_community.graph_vectorstores.cassandra.CassandraGraphVectorStore`
graph vector store::
from langchain_community.document_loaders import TextLoader
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.extractors import (
KeybertLinkExtractor,
LinkExtractorTransformer,
)
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
pipeline = LinkExtractorTransformer([KeybertLinkExtractor()])
pipeline.transform_documents(texts)
embeddings = OpenAIEmbeddings()
graph_vectorstore = CassandraGraphVectorStore.from_documents(texts, embeddings)
We can then instantiate a retriever::
retriever = graph_vectorstore.as_retriever()
This creates a retriever (specifically a ``GraphVectorStoreRetriever``), which we
can use in the usual way::
docs = retriever.invoke("what did the president say about ketanji brown jackson?")
Maximum marginal relevance traversal retrieval
----------------------------------------------
By default, the graph vector store retriever uses similarity search, then expands
the retrieved set by following a fixed number of graph edges.
If the underlying graph vector store supports maximum marginal relevance traversal,
you can specify that as the search type.
MMR-traversal is a retrieval method combining MMR and graph traversal.
The strategy first retrieves the top fetch_k results by similarity to the question.
It then iteratively expands the set of fetched documents by following adjacent_k
graph edges and selects the top k results based on maximum-marginal relevance using
the given ``lambda_mult``::
retriever = graph_vectorstore.as_retriever(search_type="mmr_traversal")
Passing search parameters
-------------------------
We can pass parameters to the underlying graph vectorstore's search methods using
``search_kwargs``.
Specifying graph traversal depth
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For example, we can set the graph traversal depth to only return documents
reachable through a given number of graph edges::
retriever = graph_vectorstore.as_retriever(search_kwargs={"depth": 3})
Specifying MMR parameters
^^^^^^^^^^^^^^^^^^^^^^^^^
When using search type ``mmr_traversal``, several parameters of the MMR algorithm
can be configured.
The ``fetch_k`` parameter determines how many documents are fetched using vector
similarity and ``adjacent_k`` parameter determines how many documents are fetched
using graph edges.
The ``lambda_mult`` parameter controls how the MMR re-ranking weights similarity to
the query string vs diversity among the retrieved documents as fetched documents
are selected for the set of ``k`` final results::
retriever = graph_vectorstore.as_retriever(
search_type="mmr",
search_kwargs={"fetch_k": 20, "adjacent_k": 20, "lambda_mult": 0.25},
)
Specifying top k
^^^^^^^^^^^^^^^^
We can also limit the number of documents ``k`` returned by the retriever.
Note that if ``depth`` is greater than zero, the retriever may return more documents
than is specified by ``k``, since both the original ``k`` documents retrieved using
vector similarity and any documents connected via graph edges will be returned::
retriever = graph_vectorstore.as_retriever(search_kwargs={"k": 1})
Similarity score threshold retrieval
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For example, we can set a similarity score threshold and only return documents with
a score above that threshold::
retriever = graph_vectorstore.as_retriever(search_kwargs={"score_threshold": 0.5})
""" # noqa: E501
vectorstore: GraphVectorStore
"""GraphVectorStore to use for retrieval."""
search_type: str = "traversal"
"""Type of search to perform. Defaults to "traversal"."""
allowed_search_types: ClassVar[Collection[str]] = (
"similarity",
"similarity_score_threshold",
"mmr",
"traversal",
"mmr_traversal",
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
if self.search_type == "traversal":
return list(self.vectorstore.traversal_search(query, **self.search_kwargs))
elif self.search_type == "mmr_traversal":
return list(
self.vectorstore.mmr_traversal_search(query, **self.search_kwargs)
)
else:
return super()._get_relevant_documents(query, run_manager=run_manager)
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
if self.search_type == "traversal":
return [
doc
async for doc in self.vectorstore.atraversal_search(
query, **self.search_kwargs
)
]
elif self.search_type == "mmr_traversal":
return [
doc
async for doc in self.vectorstore.ammr_traversal_search(
query, **self.search_kwargs
)
]
else:
return await super()._aget_relevant_documents(
query, run_manager=run_manager
)
__all__ = ["GraphVectorStore", "GraphVectorStoreRetriever", "Node"]

View File

@@ -12,12 +12,12 @@ from typing import (
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_community.graph_vectorstores.base import (
from langchain_core.graph_vectorstores.base import (
GraphVectorStore,
Node,
nodes_to_documents,
)
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:

View File

@@ -2,11 +2,11 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Union
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_core.graph_vectorstores.links import Link
from langchain_community.graph_vectorstores.extractors.link_extractor import (
LinkExtractor,
)
from langchain_community.graph_vectorstores.links import Link
# TypeAlias is not available in Python 3.9, we can't use that or the newer `type`.
GLiNERInput = Union[str, Document]
@@ -34,7 +34,7 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
.. seealso::
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
- :class:`How to create links between documents <langchain_community.graph_vectorstores.links.Link>`
- :class:`How to create links between documents <langchain_core.graph_vectorstores.links.Link>`
How to link Documents on common named entities
==============================================
@@ -59,12 +59,12 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
We can use :meth:`extract_one` on a document to get the links and add the links
to the document metadata with
:meth:`~langchain_community.graph_vectorstores.links.add_links`::
:meth:`~langchain_core.graph_vectorstores.links.add_links`::
from langchain_community.document_loaders import TextLoader
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.extractors import GLiNERLinkExtractor
from langchain_community.graph_vectorstores.links import add_links
from langchain_core.graph_vectorstores.links import add_links
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
@@ -87,7 +87,7 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
Using LinkExtractorTransformer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using the :class:`~langchain_community.graph_vectorstores.extractors.link_extractor_transformer.LinkExtractorTransformer`,
Using the :class:`~langchain_community.graph_vectorstores.extractors.keybert_link_extractor.LinkExtractorTransformer`,
we can simplify the link extraction::
from langchain_community.document_loaders import TextLoader
@@ -113,7 +113,7 @@ class GLiNERLinkExtractor(LinkExtractor[GLiNERInput]):
{'source': 'state_of_the_union.txt', 'links': [Link(kind='entity:Person', direction='bidir', tag='President Zelenskyy'), Link(kind='entity:Person', direction='bidir', tag='Vladimir Putin')]}
The documents with named entity links can then be added to a :class:`~langchain_community.graph_vectorstores.base.GraphVectorStore`::
The documents with named entity links can then be added to a :class:`~langchain_core.graph_vectorstores.base.GraphVectorStore`::
from langchain_community.graph_vectorstores import CassandraGraphVectorStore

View File

@@ -2,6 +2,7 @@ from typing import Callable, List, Set
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_core.graph_vectorstores.links import Link
from langchain_community.graph_vectorstores.extractors.link_extractor import (
LinkExtractor,
@@ -9,7 +10,6 @@ from langchain_community.graph_vectorstores.extractors.link_extractor import (
from langchain_community.graph_vectorstores.extractors.link_extractor_adapter import (
LinkExtractorAdapter,
)
from langchain_community.graph_vectorstores.links import Link
# TypeAlias is not available in Python 3.9, we can't use that or the newer `type`.
HierarchyInput = List[str]

View File

@@ -6,8 +6,8 @@ from urllib.parse import urldefrag, urljoin, urlparse
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_core.graph_vectorstores import Link
from langchain_community.graph_vectorstores import Link
from langchain_community.graph_vectorstores.extractors.link_extractor import (
LinkExtractor,
)
@@ -77,7 +77,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
.. seealso::
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
- :class:`How to create links between documents <langchain_community.graph_vectorstores.links.Link>`
- :class:`How to create links between documents <langchain_core.graph_vectorstores.links.Link>`
How to link Documents on hyperlinks in HTML
===========================================
@@ -103,7 +103,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
We can use :meth:`extract_one` on a document to get the links and add the links
to the document metadata with
:meth:`~langchain_community.graph_vectorstores.links.add_links`::
:meth:`~langchain_core.graph_vectorstores.links.add_links`::
from langchain_community.document_loaders import AsyncHtmlLoader
from langchain_community.graph_vectorstores.extractors import (
@@ -148,7 +148,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
from langchain_community.document_loaders import AsyncHtmlLoader
from langchain_community.graph_vectorstores.extractors import HtmlLinkExtractor
from langchain_community.graph_vectorstores.links import add_links
from langchain_core.graph_vectorstores.links import add_links
loader = AsyncHtmlLoader(
[
@@ -176,7 +176,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
Using LinkExtractorTransformer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using the :class:`~langchain_community.graph_vectorstores.extractors.link_extractor_transformer.LinkExtractorTransformer`,
Using the :class:`~langchain_community.graph_vectorstores.extractors.keybert_link_extractor.LinkExtractorTransformer`,
we can simplify the link extraction::
from langchain_community.document_loaders import AsyncHtmlLoader
@@ -227,7 +227,7 @@ class HtmlLinkExtractor(LinkExtractor[HtmlInput]):
Found link from https://python.langchain.com/v0.2/docs/integrations/providers/astradb/ to https://docs.datastax.com/en/astra/home/astra.html.
The documents with URL links can then be added to a :class:`~langchain_community.graph_vectorstores.base.GraphVectorStore`::
The documents with URL links can then be added to a :class:`~langchain_core.graph_vectorstores.base.GraphVectorStore`::
from langchain_community.graph_vectorstores import CassandraGraphVectorStore

View File

@@ -2,11 +2,11 @@ from typing import Any, Dict, Iterable, Optional, Set, Union
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_core.graph_vectorstores.links import Link
from langchain_community.graph_vectorstores.extractors.link_extractor import (
LinkExtractor,
)
from langchain_community.graph_vectorstores.links import Link
KeybertInput = Union[str, Document]
@@ -37,7 +37,7 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
.. seealso::
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
- :class:`How to create links between documents <langchain_community.graph_vectorstores.links.Link>`
- :class:`How to create links between documents <langchain_core.graph_vectorstores.links.Link>`
How to link Documents on common keywords using Keybert
======================================================
@@ -62,12 +62,12 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
We can use :meth:`extract_one` on a document to get the links and add the links
to the document metadata with
:meth:`~langchain_community.graph_vectorstores.links.add_links`::
:meth:`~langchain_core.graph_vectorstores.links.add_links`::
from langchain_community.document_loaders import TextLoader
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.extractors import KeybertLinkExtractor
from langchain_community.graph_vectorstores.links import add_links
from langchain_core.graph_vectorstores.links import add_links
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
@@ -91,7 +91,7 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
Using LinkExtractorTransformer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using the :class:`~langchain_community.graph_vectorstores.extractors.link_extractor_transformer.LinkExtractorTransformer`,
Using the :class:`~langchain_community.graph_vectorstores.extractors.keybert_link_extractor.LinkExtractorTransformer`,
we can simplify the link extraction::
from langchain_community.document_loaders import TextLoader
@@ -116,7 +116,7 @@ class KeybertLinkExtractor(LinkExtractor[KeybertInput]):
{'source': 'state_of_the_union.txt', 'links': [Link(kind='kw', direction='bidir', tag='ukraine'), Link(kind='kw', direction='bidir', tag='ukrainian'), Link(kind='kw', direction='bidir', tag='putin'), Link(kind='kw', direction='bidir', tag='vladimir'), Link(kind='kw', direction='bidir', tag='russia')]}
The documents with keyword links can then be added to a :class:`~langchain_community.graph_vectorstores.base.GraphVectorStore`::
The documents with keyword links can then be added to a :class:`~langchain_core.graph_vectorstores.base.GraphVectorStore`::
from langchain_community.graph_vectorstores import CassandraGraphVectorStore

View File

@@ -4,8 +4,7 @@ from abc import ABC, abstractmethod
from typing import Generic, Iterable, Set, TypeVar
from langchain_core._api import beta
from langchain_community.graph_vectorstores import Link
from langchain_core.graph_vectorstores import Link
InputT = TypeVar("InputT")

View File

@@ -1,8 +1,8 @@
from typing import Callable, Iterable, Set, TypeVar
from langchain_core._api import beta
from langchain_core.graph_vectorstores import Link
from langchain_community.graph_vectorstores import Link
from langchain_community.graph_vectorstores.extractors.link_extractor import (
LinkExtractor,
)

View File

@@ -3,11 +3,11 @@ from typing import Any, Sequence
from langchain_core._api import beta
from langchain_core.documents import Document
from langchain_core.documents.transformers import BaseDocumentTransformer
from langchain_core.graph_vectorstores.links import copy_with_links
from langchain_community.graph_vectorstores.extractors.link_extractor import (
LinkExtractor,
)
from langchain_community.graph_vectorstores.links import copy_with_links
@beta()

View File

@@ -1,102 +1,8 @@
from collections.abc import Iterable
from dataclasses import dataclass
from typing import Literal, Union
from langchain_core.graph_vectorstores.links import (
Link,
add_links,
copy_with_links,
get_links,
)
from langchain_core._api import beta
from langchain_core.documents import Document
@beta()
@dataclass(frozen=True)
class Link:
"""A link to/from a tag of a given tag.
Edges exist from nodes with an outgoing link to nodes with a matching incoming link.
"""
kind: str
"""The kind of link. Allows different extractors to use the same tag name without
creating collisions between extractors. For example “keyword” vs “url”."""
direction: Literal["in", "out", "bidir"]
"""The direction of the link."""
tag: str
"""The tag of the link."""
@staticmethod
def incoming(kind: str, tag: str) -> "Link":
"""Create an incoming link."""
return Link(kind=kind, direction="in", tag=tag)
@staticmethod
def outgoing(kind: str, tag: str) -> "Link":
"""Create an outgoing link."""
return Link(kind=kind, direction="out", tag=tag)
@staticmethod
def bidir(kind: str, tag: str) -> "Link":
"""Create a bidirectional link."""
return Link(kind=kind, direction="bidir", tag=tag)
METADATA_LINKS_KEY = "links"
@beta()
def get_links(doc: Document) -> list[Link]:
"""Get the links from a document.
Args:
doc: The document to get the link tags from.
Returns:
The set of link tags from the document.
"""
links = doc.metadata.setdefault(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
# Convert to a list and remember that.
links = list(links)
doc.metadata[METADATA_LINKS_KEY] = links
return links
@beta()
def add_links(doc: Document, *links: Union[Link, Iterable[Link]]) -> None:
"""Add links to the given metadata.
Args:
doc: The document to add the links to.
*links: The links to add to the document.
"""
links_in_metadata = get_links(doc)
for link in links:
if isinstance(link, Iterable):
links_in_metadata.extend(link)
else:
links_in_metadata.append(link)
@beta()
def copy_with_links(doc: Document, *links: Union[Link, Iterable[Link]]) -> Document:
"""Return a document with the given links added.
Args:
doc: The document to add the links to.
*links: The links to add to the document.
Returns:
A document with a shallow-copy of the metadata with the links added.
"""
new_links = set(get_links(doc))
for link in links:
if isinstance(link, Iterable):
new_links.update(link)
else:
new_links.add(link)
return Document(
page_content=doc.page_content,
metadata={
**doc.metadata,
METADATA_LINKS_KEY: list(new_links),
},
)
__all__ = ["Link", "add_links", "get_links", "copy_with_links"]

View File

@@ -411,9 +411,7 @@ class Neo4jGraph(GraphStore):
return self.structured_schema
def query(
self,
query: str,
params: dict = {},
self, query: str, params: dict = {}, retry_on_session_expired: bool = True
) -> List[Dict[str, Any]]:
"""Query Neo4j database.
@@ -425,44 +423,26 @@ class Neo4jGraph(GraphStore):
List[Dict[str, Any]]: The list of dictionaries containing the query results.
"""
from neo4j import Query
from neo4j.exceptions import Neo4jError
from neo4j.exceptions import CypherSyntaxError, SessionExpired
try:
data, _, _ = self._driver.execute_query(
Query(text=query, timeout=self.timeout),
database=self._database,
parameters_=params,
)
json_data = [r.data() for r in data]
if self.sanitize:
json_data = [value_sanitize(el) for el in json_data]
return json_data
except Neo4jError as e:
if not (
(
( # isCallInTransactionError
e.code == "Neo.DatabaseError.Statement.ExecutionFailed"
or e.code
== "Neo.DatabaseError.Transaction.TransactionStartFailed"
with self._driver.session(database=self._database) as session:
try:
data = session.run(Query(text=query, timeout=self.timeout), params)
json_data = [r.data() for r in data]
if self.sanitize:
json_data = [value_sanitize(el) for el in json_data]
return json_data
except CypherSyntaxError as e:
raise ValueError(f"Generated Cypher Statement is not valid\n{e}")
except (
SessionExpired
) as e: # Session expired is a transient error that can be retried
if retry_on_session_expired:
return self.query(
query, params=params, retry_on_session_expired=False
)
and "in an implicit transaction" in e.message
)
or ( # isPeriodicCommitError
e.code == "Neo.ClientError.Statement.SemanticError"
and (
"in an open transaction is not possible" in e.message
or "tried to execute in an explicit transaction" in e.message
)
)
):
raise
# fallback to allow implicit transactions
with self._driver.session() as session:
data = session.run(Query(text=query, timeout=self.timeout), params)
json_data = [r.data() for r in data]
if self.sanitize:
json_data = [value_sanitize(el) for el in json_data]
return json_data
else:
raise e
def refresh_schema(self) -> None:
"""

View File

@@ -510,6 +510,12 @@ def _import_sagemaker_endpoint() -> Type[BaseLLM]:
return SagemakerEndpoint
def _import_sambaverse() -> Type[BaseLLM]:
from langchain_community.llms.sambanova import Sambaverse
return Sambaverse
def _import_sambastudio() -> Type[BaseLLM]:
from langchain_community.llms.sambanova import SambaStudio
@@ -811,6 +817,8 @@ def __getattr__(name: str) -> Any:
return _import_rwkv()
elif name == "SagemakerEndpoint":
return _import_sagemaker_endpoint()
elif name == "Sambaverse":
return _import_sambaverse()
elif name == "SambaStudio":
return _import_sambastudio()
elif name == "SelfHostedPipeline":
@@ -946,6 +954,7 @@ __all__ = [
"RWKV",
"Replicate",
"SagemakerEndpoint",
"Sambaverse",
"SambaStudio",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
@@ -1042,6 +1051,7 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"sambaverse": _import_sambaverse,
"sambastudio": _import_sambastudio,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,

View File

@@ -9,6 +9,464 @@ from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import ConfigDict
class SVEndpointHandler:
"""
SambaNova Systems Interface for Sambaverse endpoint.
:param str host_url: Base URL of the DaaS API service
"""
API_BASE_PATH: str = "/api/predict"
def __init__(self, host_url: str):
"""
Initialize the SVEndpointHandler.
:param str host_url: Base URL of the DaaS API service
"""
self.host_url = host_url
self.http_session = requests.Session()
@staticmethod
def _process_response(response: requests.Response) -> Dict:
"""
Processes the API response and returns the resulting dict.
All resulting dicts, regardless of success or failure, will contain the
`status_code` key with the API response status code.
If the API returned an error, the resulting dict will contain the key
`detail` with the error message.
If the API call was successful, the resulting dict will contain the key
`data` with the response data.
:param requests.Response response: the response object to process
:return: the response dict
:type: dict
"""
result: Dict[str, Any] = {}
try:
lines_result = response.text.strip().split("\n")
text_result = lines_result[-1]
if response.status_code == 200 and json.loads(text_result).get("error"):
completion = ""
for line in lines_result[:-1]:
completion += json.loads(line)["result"]["responses"][0][
"stream_token"
]
text_result = lines_result[-2]
result = json.loads(text_result)
result["result"]["responses"][0]["completion"] = completion
else:
result = json.loads(text_result)
except Exception as e:
result["detail"] = str(e)
if "status_code" not in result:
result["status_code"] = response.status_code
return result
@staticmethod
def _process_streaming_response(
response: requests.Response,
) -> Generator[Dict, None, None]:
"""Process the streaming response"""
try:
for line in response.iter_lines():
chunk = json.loads(line)
if "status_code" not in chunk:
chunk["status_code"] = response.status_code
if chunk["status_code"] == 200 and chunk.get("error"):
chunk["result"] = {"responses": [{"stream_token": ""}]}
return chunk
yield chunk
except Exception as e:
raise RuntimeError(f"Error processing streaming response: {e}")
def _get_full_url(self) -> str:
"""
Return the full API URL for a given path.
:returns: the full API URL for the sub-path
:type: str
"""
return f"{self.host_url}{self.API_BASE_PATH}"
def nlp_predict(
self,
key: str,
sambaverse_model_name: Optional[str],
input: Union[List[str], str],
params: Optional[str] = "",
stream: bool = False,
) -> Dict:
"""
NLP predict using inline input string.
:param str project: Project ID in which the endpoint exists
:param str endpoint: Endpoint ID
:param str key: API Key
:param str input_str: Input string
:param str params: Input params string
:returns: Prediction results
:type: dict
"""
if params:
data = {"instance": input, "params": json.loads(params)}
else:
data = {"instance": input}
response = self.http_session.post(
self._get_full_url(),
headers={
"key": key,
"Content-Type": "application/json",
"modelName": sambaverse_model_name,
},
json=data,
)
return SVEndpointHandler._process_response(response)
def nlp_predict_stream(
self,
key: str,
sambaverse_model_name: Optional[str],
input: Union[List[str], str],
params: Optional[str] = "",
) -> Iterator[Dict]:
"""
NLP predict using inline input string.
:param str project: Project ID in which the endpoint exists
:param str endpoint: Endpoint ID
:param str key: API Key
:param str input_str: Input string
:param str params: Input params string
:returns: Prediction results
:type: dict
"""
if params:
data = {"instance": input, "params": json.loads(params)}
else:
data = {"instance": input}
# Streaming output
response = self.http_session.post(
self._get_full_url(),
headers={
"key": key,
"Content-Type": "application/json",
"modelName": sambaverse_model_name,
},
json=data,
stream=True,
)
for chunk in SVEndpointHandler._process_streaming_response(response):
yield chunk
class Sambaverse(LLM):
"""
Sambaverse large language models.
To use, you should have the environment variable ``SAMBAVERSE_API_KEY``
set with your API key.
get one in https://sambaverse.sambanova.ai
read extra documentation in https://docs.sambanova.ai/sambaverse/latest/index.html
Example:
.. code-block:: python
from langchain_community.llms.sambanova import Sambaverse
Sambaverse(
sambaverse_url="https://sambaverse.sambanova.ai",
sambaverse_api_key="your-sambaverse-api-key",
sambaverse_model_name="Meta/llama-2-7b-chat-hf",
streaming: = False
model_kwargs={
"select_expert": "llama-2-7b-chat-hf",
"do_sample": False,
"max_tokens_to_generate": 100,
"temperature": 0.7,
"top_p": 1.0,
"repetition_penalty": 1.0,
"top_k": 50,
"process_prompt": False
},
)
"""
sambaverse_url: str = ""
"""Sambaverse url to use"""
sambaverse_api_key: str = ""
"""sambaverse api key"""
sambaverse_model_name: Optional[str] = None
"""sambaverse expert model to use"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
streaming: Optional[bool] = False
"""Streaming flag to get streamed response."""
model_config = ConfigDict(
extra="forbid",
)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["sambaverse_url"] = get_from_dict_or_env(
values,
"sambaverse_url",
"SAMBAVERSE_URL",
default="https://sambaverse.sambanova.ai",
)
values["sambaverse_api_key"] = get_from_dict_or_env(
values, "sambaverse_api_key", "SAMBAVERSE_API_KEY"
)
values["sambaverse_model_name"] = get_from_dict_or_env(
values, "sambaverse_model_name", "SAMBAVERSE_MODEL_NAME"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_kwargs": self.model_kwargs}}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "Sambaverse LLM"
def _get_tuning_params(self, stop: Optional[List[str]]) -> str:
"""
Get the tuning parameters to use when calling the LLM.
Args:
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
Returns:
The tuning parameters as a JSON string.
"""
_model_kwargs = self.model_kwargs or {}
_kwarg_stop_sequences = _model_kwargs.get("stop_sequences", [])
_stop_sequences = stop or _kwarg_stop_sequences
if not _kwarg_stop_sequences:
_model_kwargs["stop_sequences"] = ",".join(
f'"{x}"' for x in _stop_sequences
)
tuning_params_dict = {
k: {"type": type(v).__name__, "value": str(v)}
for k, v in (_model_kwargs.items())
}
_model_kwargs["stop_sequences"] = _kwarg_stop_sequences
tuning_params = json.dumps(tuning_params_dict)
return tuning_params
def _handle_nlp_predict(
self,
sdk: SVEndpointHandler,
prompt: Union[List[str], str],
tuning_params: str,
) -> str:
"""
Perform an NLP prediction using the Sambaverse endpoint handler.
Args:
sdk: The SVEndpointHandler to use for the prediction.
prompt: The prompt to use for the prediction.
tuning_params: The tuning parameters to use for the prediction.
Returns:
The prediction result.
Raises:
ValueError: If the prediction fails.
"""
response = sdk.nlp_predict(
self.sambaverse_api_key, self.sambaverse_model_name, prompt, tuning_params
)
if response["status_code"] != 200:
error = response.get("error")
if error:
optional_code = error.get("code")
optional_details = error.get("details")
optional_message = error.get("message")
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response['status_code']}.\n"
f"Message: {optional_message}\n"
f"Details: {optional_details}\n"
f"Code: {optional_code}\n"
)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response['status_code']}."
f"{response}."
)
return response["result"]["responses"][0]["completion"]
def _handle_completion_requests(
self, prompt: Union[List[str], str], stop: Optional[List[str]]
) -> str:
"""
Perform a prediction using the Sambaverse endpoint handler.
Args:
prompt: The prompt to use for the prediction.
stop: stop sequences.
Returns:
The prediction result.
Raises:
ValueError: If the prediction fails.
"""
ss_endpoint = SVEndpointHandler(self.sambaverse_url)
tuning_params = self._get_tuning_params(stop)
return self._handle_nlp_predict(ss_endpoint, prompt, tuning_params)
def _handle_nlp_predict_stream(
self, sdk: SVEndpointHandler, prompt: Union[List[str], str], tuning_params: str
) -> Iterator[GenerationChunk]:
"""
Perform a streaming request to the LLM.
Args:
sdk: The SVEndpointHandler to use for the prediction.
prompt: The prompt to use for the prediction.
tuning_params: The tuning parameters to use for the prediction.
Returns:
An iterator of GenerationChunks.
"""
for chunk in sdk.nlp_predict_stream(
self.sambaverse_api_key, self.sambaverse_model_name, prompt, tuning_params
):
if chunk["status_code"] != 200:
error = chunk.get("error")
if error:
optional_code = error.get("code")
optional_details = error.get("details")
optional_message = error.get("message")
raise ValueError(
f"Sambanova /complete call failed with status code "
f"{chunk['status_code']}.\n"
f"Message: {optional_message}\n"
f"Details: {optional_details}\n"
f"Code: {optional_code}\n"
)
else:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{chunk['status_code']}."
f"{chunk}."
)
text = chunk["result"]["responses"][0]["stream_token"]
generated_chunk = GenerationChunk(text=text)
yield generated_chunk
def _stream(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Stream the Sambaverse's LLM on the given prompt.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
run_manager: Callback manager for the run.
kwargs: Additional keyword arguments. directly passed
to the sambaverse model in API call.
Returns:
An iterator of GenerationChunks.
"""
ss_endpoint = SVEndpointHandler(self.sambaverse_url)
tuning_params = self._get_tuning_params(stop)
try:
if self.streaming:
for chunk in self._handle_nlp_predict_stream(
ss_endpoint, prompt, tuning_params
):
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
else:
return
except Exception as e:
# Handle any errors raised by the inference endpoint
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
def _handle_stream_request(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]],
run_manager: Optional[CallbackManagerForLLMRun],
kwargs: Dict[str, Any],
) -> str:
"""
Perform a streaming request to the LLM.
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
run_manager: Callback manager for the run.
kwargs: Additional keyword arguments. directly passed
to the sambaverse model in API call.
Returns:
The model output as a string.
"""
completion = ""
for chunk in self._stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
completion += chunk.text
return completion
def _call(
self,
prompt: Union[List[str], str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Run the LLM on the given input.
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of the stop substrings.
run_manager: Callback manager for the run.
kwargs: Additional keyword arguments. directly passed
to the sambaverse model in API call.
Returns:
The model output as a string.
"""
try:
if self.streaming:
return self._handle_stream_request(prompt, stop, run_manager, kwargs)
return self._handle_completion_requests(prompt, stop)
except Exception as e:
# Handle any errors raised by the inference endpoint
raise ValueError(f"Error raised by the inference endpoint: {e}") from e
class SSEndpointHandler:
"""
SambaNova Systems Interface for SambaStudio model endpoints.
@@ -517,7 +975,7 @@ class SambaStudio(LLM):
first occurrence of any of the stop substrings.
run_manager: Callback manager for the run.
kwargs: Additional keyword arguments. directly passed
to the sambastudio model in API call.
to the sambaverse model in API call.
Returns:
The model output as a string.

View File

@@ -10,6 +10,7 @@ from pydantic import BaseModel, Field, create_model
from typing_extensions import Self
if TYPE_CHECKING:
from databricks.sdk import WorkspaceClient
from databricks.sdk.service.catalog import FunctionInfo
from pydantic import ConfigDict
@@ -120,7 +121,7 @@ def _get_tool_name(function: "FunctionInfo") -> str:
return tool_name
def _get_default_workspace_client() -> Any:
def _get_default_workspace_client() -> "WorkspaceClient":
try:
from databricks.sdk import WorkspaceClient
except ImportError as e:
@@ -136,7 +137,7 @@ class UCFunctionToolkit(BaseToolkit):
description="The ID of a Databricks SQL Warehouse to execute functions."
)
workspace_client: Any = Field(
workspace_client: "WorkspaceClient" = Field(
default_factory=_get_default_workspace_client,
description="Databricks workspace client.",
)

View File

@@ -69,19 +69,6 @@ class ZenGuardTool(BaseTool):
)
return v
@property
def _api_key(self) -> str:
if self.zenguard_api_key is None:
raise ValueError(
"API key is required for the ZenGuardTool. "
"Please provide the API key by either:\n"
"1. Manually specifying it when initializing the tool: "
"ZenGuardTool(zenguard_api_key='your_api_key')\n"
"2. Setting it as an environment variable:"
f" {self._ZENGUARD_API_KEY_ENV_NAME}"
)
return self.zenguard_api_key
def _run(
self,
prompts: List[str],
@@ -104,7 +91,7 @@ class ZenGuardTool(BaseTool):
response = requests.post(
self._ZENGUARD_API_URL_ROOT + postfix,
json=json,
headers={"x-api-key": self._api_key},
headers={"x-api-key": self.zenguard_api_key},
timeout=5,
)
response.raise_for_status()

View File

@@ -24,18 +24,6 @@ class FinancialDatasetsAPIWrapper(BaseModel):
data, "financial_datasets_api_key", "FINANCIAL_DATASETS_API_KEY"
)
@property
def _api_key(self) -> str:
if self.financial_datasets_api_key is None:
raise ValueError(
"API key is required for the FinancialDatasetsAPIWrapper. "
"Please provide the API key by either:\n"
"1. Manually specifying it when initializing the wrapper: "
"FinancialDatasetsAPIWrapper(financial_datasets_api_key='your_api_key')\n"
"2. Setting it as an environment variable: FINANCIAL_DATASETS_API_KEY"
)
return self.financial_datasets_api_key
def get_income_statements(
self,
ticker: str,
@@ -59,7 +47,7 @@ class FinancialDatasetsAPIWrapper(BaseModel):
)
# Add the api key to the headers
headers = {"X-API-KEY": self._api_key}
headers = {"X-API-KEY": self.financial_datasets_api_key}
# Execute the request
response = requests.get(url, headers=headers)
@@ -90,7 +78,7 @@ class FinancialDatasetsAPIWrapper(BaseModel):
)
# Add the api key to the headers
headers = {"X-API-KEY": self._api_key}
headers = {"X-API-KEY": self.financial_datasets_api_key}
# Execute the request
response = requests.get(url, headers=headers)
@@ -122,7 +110,7 @@ class FinancialDatasetsAPIWrapper(BaseModel):
)
# Add the api key to the headers
headers = {"X-API-KEY": self._api_key}
headers = {"X-API-KEY": self.financial_datasets_api_key}
# Execute the request
response = requests.get(url, headers=headers)

View File

@@ -443,12 +443,6 @@ class AzureSearch(VectorStore):
logger.debug("Nothing to insert, skipping.")
return []
# when `keys` are not passed in and there is `ids` in kwargs, use those instead
# base class expects `ids` passed in rather than `keys`
# https://github.com/langchain-ai/langchain/blob/4cdaca67dc51dba887289f56c6fead3c1a52f97d/libs/core/langchain_core/vectorstores/base.py#L65
if (not keys) and ("ids" in kwargs) and (len(kwargs["ids"]) == len(embeddings)):
keys = kwargs["ids"]
return self.add_embeddings(zip(texts, embeddings), metadatas, keys=keys)
async def aadd_texts(
@@ -473,12 +467,6 @@ class AzureSearch(VectorStore):
logger.debug("Nothing to insert, skipping.")
return []
# when `keys` are not passed in and there is `ids` in kwargs, use those instead
# base class expects `ids` passed in rather than `keys`
# https://github.com/langchain-ai/langchain/blob/4cdaca67dc51dba887289f56c6fead3c1a52f97d/libs/core/langchain_core/vectorstores/base.py#L65
if (not keys) and ("ids" in kwargs) and (len(kwargs["ids"]) == len(embeddings)):
keys = kwargs["ids"]
return await self.aadd_embeddings(zip(texts, embeddings), metadatas, keys=keys)
def add_embeddings(
@@ -495,13 +483,9 @@ class AzureSearch(VectorStore):
data = []
for i, (text, embedding) in enumerate(text_embeddings):
# Use provided key otherwise use default key
if keys:
key = keys[i]
else:
key = str(uuid.uuid4())
# Encoding key for Azure Search valid characters
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
key = keys[i] if keys else str(uuid.uuid4())
# Encoding key for Azure Search valid characters
key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii")
metadata = metadatas[i] if metadatas else {}
# Add data to index
# Additional metadata to fields mapping

View File

@@ -65,12 +65,10 @@ class Epsilla(VectorStore):
"Please install pyepsilla package with `pip install pyepsilla`."
) from e
if not isinstance(
client, (pyepsilla.vectordb.Client, pyepsilla.cloud.client.Vectordb)
):
if not isinstance(client, pyepsilla.vectordb.Client):
raise TypeError(
"client should be an instance of pyepsilla.vectordb.Client or "
f"pyepsilla.cloud.client.Vectordb, got {type(client)}"
f"client should be an instance of pyepsilla.vectordb.Client, "
f"got {type(client)}"
)
self._client: vectordb.Client = client

View File

@@ -595,8 +595,11 @@ class Neo4jVector(VectorStore):
query: str,
*,
params: Optional[dict] = None,
retry_on_session_expired: bool = True,
) -> List[Dict[str, Any]]:
"""Query Neo4j database with retries and exponential backoff.
"""
This method sends a Cypher query to the connected Neo4j database
and returns the results as a list of dictionaries.
Args:
query (str): The Cypher query to execute.
@@ -605,38 +608,24 @@ class Neo4jVector(VectorStore):
Returns:
List[Dict[str, Any]]: List of dictionaries containing the query results.
"""
from neo4j import Query
from neo4j.exceptions import Neo4jError
from neo4j.exceptions import CypherSyntaxError, SessionExpired
params = params or {}
try:
data, _, _ = self._driver.execute_query(
query, database=self._database, parameters_=params
)
return [r.data() for r in data]
except Neo4jError as e:
if not (
(
( # isCallInTransactionError
e.code == "Neo.DatabaseError.Statement.ExecutionFailed"
or e.code
== "Neo.DatabaseError.Transaction.TransactionStartFailed"
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f"Cypher Statement is not valid\n{e}")
except (
SessionExpired
) as e: # Session expired is a transient error that can be retried
if retry_on_session_expired:
return self.query(
query, params=params, retry_on_session_expired=False
)
and "in an implicit transaction" in e.message
)
or ( # isPeriodicCommitError
e.code == "Neo.ClientError.Statement.SemanticError"
and (
"in an open transaction is not possible" in e.message
or "tried to execute in an explicit transaction" in e.message
)
)
):
raise
# Fallback to allow implicit transactions
with self._driver.session() as session:
data = session.run(Query(text=query), params)
return [r.data() for r in data]
else:
raise e
def verify_version(self) -> None:
"""

View File

@@ -144,7 +144,7 @@ class TencentVectorDB(VectorStore):
In order to use this you need to have a database instance.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/104489
https://cloud.tencent.com/document/product/1709/94951
"""
field_id: str = "id"

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -150,13 +150,13 @@ files = [
[[package]]
name = "anyio"
version = "4.5.0"
version = "4.4.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.5.0-py3-none-any.whl", hash = "sha256:fdeb095b7cc5a5563175eedd926ec4ae55413bb4be5770c424af0ba46ccb4a78"},
{file = "anyio-4.5.0.tar.gz", hash = "sha256:c5a275fe5ca0afd788001f58fca1e69e29ce706d746e317d660e21f70c530ef9"},
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
]
[package.dependencies]
@@ -166,9 +166,9 @@ sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (>=0.23)"]
[[package]]
name = "appnope"
@@ -1251,18 +1251,15 @@ zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "idna"
version = "3.10"
version = "3.8"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
{file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"},
{file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "importlib-metadata"
version = "8.5.0"
@@ -1538,13 +1535,13 @@ notebook = "*"
[[package]]
name = "jupyter-client"
version = "8.6.3"
version = "8.6.2"
description = "Jupyter protocol implementation and client libraries"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"},
{file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"},
{file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"},
{file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"},
]
[package.dependencies]
@@ -1783,7 +1780,7 @@ files = [
[[package]]
name = "langchain"
version = "0.3.0"
version = "0.3.0.dev2"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.9,<4.0"
@@ -1793,12 +1790,12 @@ develop = true
[package.dependencies]
aiohttp = "^3.8.3"
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
langchain-core = "^0.3.0"
langchain-text-splitters = "^0.3.0"
langchain-core = "^0.3.0.dev5"
langchain-text-splitters = "^0.3.0.dev1"
langsmith = "^0.1.17"
numpy = [
{version = "^1", markers = "python_version < \"3.12\""},
{version = "^1.26.0", markers = "python_version >= \"3.12\""},
{version = ">=1,<2", markers = "python_version < \"3.12\""},
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
]
pydantic = "^2.7.4"
PyYAML = ">=5.3"
@@ -1812,7 +1809,7 @@ url = "../langchain"
[[package]]
name = "langchain-core"
version = "0.3.2"
version = "0.3.0.dev5"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.9,<4.0"
@@ -1821,11 +1818,11 @@ develop = true
[package.dependencies]
jsonpatch = "^1.33"
langsmith = "^0.1.125"
langsmith = "^0.1.117"
packaging = ">=23.2,<25"
pydantic = [
{version = "^2.5.2", markers = "python_full_version < \"3.12.4\""},
{version = "^2.7.4", markers = "python_full_version >= \"3.12.4\""},
{version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""},
{version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""},
]
PyYAML = ">=5.3"
tenacity = "^8.1.0,!=8.4.0"
@@ -1846,7 +1843,7 @@ develop = true
[package.dependencies]
httpx = "^0.27.0"
langchain-core = "^0.3.0"
langchain-core = ">=0.3.0.dev1"
pytest = ">=7,<9"
syrupy = "^4"
@@ -1856,7 +1853,7 @@ url = "../standard-tests"
[[package]]
name = "langchain-text-splitters"
version = "0.3.0"
version = "0.3.0.dev1"
description = "LangChain text splitting utilities"
optional = false
python-versions = ">=3.9,<4.0"
@@ -1864,7 +1861,7 @@ files = []
develop = true
[package.dependencies]
langchain-core = "^0.3.0"
langchain-core = "^0.3.0.dev1"
[package.source]
type = "directory"
@@ -1872,13 +1869,13 @@ url = "../text-splitters"
[[package]]
name = "langsmith"
version = "0.1.125"
version = "0.1.120"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.125-py3-none-any.whl", hash = "sha256:74ce8eb2663e1ed20bfcfc88d41e0712879306956c9938d1cdbab7d60458bdca"},
{file = "langsmith-0.1.125.tar.gz", hash = "sha256:2c0eb0c3cbf22cff55bf519b8e889041f9a591bcf97af5152c8e130333c5940e"},
{file = "langsmith-0.1.120-py3-none-any.whl", hash = "sha256:54d2785e301646c0988e0a69ebe4d976488c87b41928b358cb153b6ddd8db62b"},
{file = "langsmith-0.1.120.tar.gz", hash = "sha256:25499ca187b41bd89d784b272b97a8d76f60e0e21bdf20336e8a2aa6a9b23ac9"},
]
[package.dependencies]
@@ -2577,13 +2574,13 @@ ptyprocess = ">=0.5"
[[package]]
name = "platformdirs"
version = "4.3.6"
version = "4.3.2"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
files = [
{file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
{file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
{file = "platformdirs-4.3.2-py3-none-any.whl", hash = "sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617"},
{file = "platformdirs-4.3.2.tar.gz", hash = "sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c"},
]
[package.extras]
@@ -2636,22 +2633,22 @@ wcwidth = "*"
[[package]]
name = "protobuf"
version = "5.28.2"
version = "5.28.1"
description = ""
optional = false
python-versions = ">=3.8"
files = [
{file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"},
{file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"},
{file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"},
{file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"},
{file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"},
{file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"},
{file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"},
{file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"},
{file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"},
{file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"},
{file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"},
{file = "protobuf-5.28.1-cp310-abi3-win32.whl", hash = "sha256:fc063acaf7a3d9ca13146fefb5b42ac94ab943ec6e978f543cd5637da2d57957"},
{file = "protobuf-5.28.1-cp310-abi3-win_amd64.whl", hash = "sha256:4c7f5cb38c640919791c9f74ea80c5b82314c69a8409ea36f2599617d03989af"},
{file = "protobuf-5.28.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4304e4fceb823d91699e924a1fdf95cde0e066f3b1c28edb665bda762ecde10f"},
{file = "protobuf-5.28.1-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:0dfd86d2b5edf03d91ec2a7c15b4e950258150f14f9af5f51c17fa224ee1931f"},
{file = "protobuf-5.28.1-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:51f09caab818707ab91cf09cc5c156026599cf05a4520779ccbf53c1b352fb25"},
{file = "protobuf-5.28.1-cp38-cp38-win32.whl", hash = "sha256:1b04bde117a10ff9d906841a89ec326686c48ececeb65690f15b8cabe7149495"},
{file = "protobuf-5.28.1-cp38-cp38-win_amd64.whl", hash = "sha256:cabfe43044ee319ad6832b2fda332646f9ef1636b0130186a3ae0a52fc264bb4"},
{file = "protobuf-5.28.1-cp39-cp39-win32.whl", hash = "sha256:4b4b9a0562a35773ff47a3df823177ab71a1f5eb1ff56d8f842b7432ecfd7fd2"},
{file = "protobuf-5.28.1-cp39-cp39-win_amd64.whl", hash = "sha256:f24e5d70e6af8ee9672ff605d5503491635f63d5db2fffb6472be78ba62efd8f"},
{file = "protobuf-5.28.1-py3-none-any.whl", hash = "sha256:c529535e5c0effcf417682563719e5d8ac8d2b93de07a56108b4c2d436d7a29a"},
{file = "protobuf-5.28.1.tar.gz", hash = "sha256:42597e938f83bb7f3e4b35f03aa45208d49ae8d5bcb4bc10b9fc825e0ab5e423"},
]
[[package]]
@@ -2721,18 +2718,18 @@ files = [
[[package]]
name = "pydantic"
version = "2.9.2"
version = "2.9.1"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
{file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
{file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
{file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.23.4"
pydantic-core = "2.23.3"
typing-extensions = [
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
@@ -2744,100 +2741,100 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
version = "2.23.4"
version = "2.23.3"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
{file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
{file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
{file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
{file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
{file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
{file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
{file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
{file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
{file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
{file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
{file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
{file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
{file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
{file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
{file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
{file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
{file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
{file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
{file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
{file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
{file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
{file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
{file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
{file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
{file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
{file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
]
[package.dependencies]
@@ -3585,64 +3582,64 @@ files = [
[[package]]
name = "sqlalchemy"
version = "2.0.35"
version = "2.0.34"
description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"},
{file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"},
{file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"},
{file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"},
{file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"},
{file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"},
{file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"},
{file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"},
{file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"},
{file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"},
{file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"},
]
[package.dependencies]
greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"}
greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"}
typing-extensions = ">=4.6.0"
[package.extras]
@@ -3841,13 +3838,13 @@ files = [
[[package]]
name = "types-protobuf"
version = "5.27.0.20240920"
version = "5.27.0.20240907"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-protobuf-5.27.0.20240920.tar.gz", hash = "sha256:992d695315d11eb2d25e806122c9e1fd9fec282e96104f0a0cb9226cd5d90293"},
{file = "types_protobuf-5.27.0.20240920-py3-none-any.whl", hash = "sha256:c04140bd3c761a55f4e661372b24a6f508169e0815f2b73da33f34b447ed7a8d"},
{file = "types-protobuf-5.27.0.20240907.tar.gz", hash = "sha256:bb6f90f66b18d4d1c75667b6586334b0573a6fcee5eb0142a7348a765a7cbadc"},
{file = "types_protobuf-5.27.0.20240907-py3-none-any.whl", hash = "sha256:5443270534cc8072909ef7ad9e1421ccff924ca658749a6396c0c43d64c32676"},
]
[[package]]
@@ -3889,13 +3886,13 @@ files = [
[[package]]
name = "types-pyyaml"
version = "6.0.12.20240917"
version = "6.0.12.20240808"
description = "Typing stubs for PyYAML"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"},
{file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"},
{file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"},
{file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"},
]
[[package]]
@@ -3927,15 +3924,29 @@ files = [
[package.dependencies]
types-urllib3 = "*"
[[package]]
name = "types-requests"
version = "2.32.0.20240907"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.32.0.20240907.tar.gz", hash = "sha256:ff33935f061b5e81ec87997e91050f7b4af4f82027a7a7a9d9aaea04a963fdf8"},
{file = "types_requests-2.32.0.20240907-py3-none-any.whl", hash = "sha256:1d1e79faeaf9d42def77f3c304893dea17a97cae98168ac69f3cb465516ee8da"},
]
[package.dependencies]
urllib3 = ">=2"
[[package]]
name = "types-setuptools"
version = "75.1.0.20240917"
version = "74.1.0.20240907"
description = "Typing stubs for setuptools"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"},
{file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"},
{file = "types-setuptools-74.1.0.20240907.tar.gz", hash = "sha256:0abdb082552ca966c1e5fc244e4853adc62971f6cd724fb1d8a3713b580e5a65"},
{file = "types_setuptools-74.1.0.20240907-py3-none-any.whl", hash = "sha256:15b38c8e63ca34f42f6063ff4b1dd662ea20086166d5ad6a102e670a52574120"},
]
[[package]]
@@ -4027,6 +4038,23 @@ brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotl
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "urllib3"
version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "vcrpy"
version = "6.0.1"
@@ -4358,4 +4386,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
content-hash = "d4ddaa606dc1af15b47b534482210ad687c8b96c816cb7ab13fa77d184514435"
content-hash = "1a81994350c65c891f5f592a522975bc6688cfad016f2af5fe8ad93a76209066"

View File

@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "langchain-community"
version = "0.3.0"
version = "0.3.0.dev2"
description = "Community contributed LangChain integrations."
authors = []
license = "MIT"
@@ -33,8 +33,8 @@ ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogy
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
langchain-core = "^0.3.0"
langchain = "^0.3.0"
langchain-core = { version = "^0.3.0.dev5", allow-prereleases = true }
langchain = { version = "^0.3.0.dev2", allow-prereleases = true }
SQLAlchemy = ">=1.4,<3"
requests = "^2"
PyYAML = ">=5.3"
@@ -42,7 +42,7 @@ aiohttp = "^3.8.3"
tenacity = "^8.1.0,!=8.4.0"
dataclasses-json = ">= 0.5.7, < 0.7"
pydantic-settings = "^2.4.0"
langsmith = "^0.1.125"
langsmith = "^0.1.112"
[[tool.poetry.dependencies.numpy]]
version = "^1"

View File

@@ -20,7 +20,7 @@ count=$(git grep -E '(@root_validator)|(@validator)|(@field_validator)|(@pre_ini
# PRs that increase the current count will not be accepted.
# PRs that decrease update the code in the repository
# and allow decreasing the count of are welcome!
current_count=128
current_count=129
if [ "$count" -gt "$current_count" ]; then
echo "The PR seems to be introducing new usage of @root_validator and/or @field_validator."

View File

@@ -1,3 +1,4 @@
import re
from pathlib import Path
from typing import Sequence, Union
@@ -10,6 +11,7 @@ from langchain_community.document_loaders import (
PDFMinerPDFasHTMLLoader,
PyMuPDFLoader,
PyPDFium2Loader,
PyPDFLoader,
UnstructuredPDFLoader,
)
@@ -84,6 +86,37 @@ def test_pdfminer_pdf_as_html_loader() -> None:
assert len(docs) == 1
def test_pypdf_loader() -> None:
"""Test PyPDFLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyPDFLoader(str(file_path))
docs = loader.load()
assert len(docs) == 16
def test_pypdf_loader_with_layout() -> None:
"""Test PyPDFLoader with layout mode."""
file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf"
loader = PyPDFLoader(str(file_path), extraction_mode="layout")
docs = loader.load()
first_page = docs[0].page_content
expected = (
Path(__file__).parent.parent / "examples/layout-parser-paper-page-1.txt"
).read_text(encoding="utf-8")
cleaned_first_page = re.sub(r"\x00", "", first_page)
cleaned_expected = re.sub(r"\x00", "", expected)
assert cleaned_first_page == cleaned_expected
def test_pypdfium2_loader() -> None:
"""Test PyPDFium2Loader."""
file_path = Path(__file__).parent.parent / "examples/hello.pdf"

View File

@@ -1,7 +1,7 @@
import pytest
from langchain_core.graph_vectorstores.links import Link
from langchain_community.graph_vectorstores.extractors import GLiNERLinkExtractor
from langchain_community.graph_vectorstores.links import Link
PAGE_1 = """
Cristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃ'tjɐnu

View File

@@ -1,7 +1,7 @@
import pytest
from langchain_core.graph_vectorstores.links import Link
from langchain_community.graph_vectorstores.extractors import KeybertLinkExtractor
from langchain_community.graph_vectorstores.links import Link
PAGE_1 = """
Supervised learning is the machine learning task of learning a function that

View File

@@ -4,9 +4,9 @@ from typing import Iterable, List, Optional, Type
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.graph_vectorstores.links import METADATA_LINKS_KEY, Link
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
CASSANDRA_DEFAULT_KEYSPACE = "graph_test_keyspace"

View File

@@ -1,17 +1,28 @@
"""Test sambanova API wrapper.
In order to run this test, you need to have a sambastudio base url,
project id, endpoint id, and api key.
You'll then need to set SAMBASTUDIO_BASE_URL, SAMBASTUDIO_BASE_URI
In order to run this test, you need to have an sambaverse api key,
and a sambaverse base url, project id, endpoint id, and api key.
You'll then need to set SAMBAVERSE_API_KEY, SAMBASTUDIO_BASE_URL,
SAMBASTUDIO_PROJECT_ID, SAMBASTUDIO_ENDPOINT_ID, and SAMBASTUDIO_API_KEY
environment variables.
"""
from langchain_community.llms.sambanova import SambaStudio
from langchain_community.llms.sambanova import SambaStudio, Sambaverse
def test_sambaverse_call() -> None:
"""Test simple non-streaming call to sambaverse."""
llm = Sambaverse(
sambaverse_model_name="Meta/llama-2-7b-chat-hf",
model_kwargs={"select_expert": "llama-2-7b-chat-hf"},
)
output = llm.invoke("What is LangChain")
assert output
assert isinstance(output, str)
def test_sambastudio_call() -> None:
"""Test simple non-streaming call to sambastudio."""
"""Test simple non-streaming call to sambaverse."""
llm = SambaStudio()
output = llm.invoke("What is LangChain")
assert output

View File

@@ -121,4 +121,4 @@ def test_callback_manager_configure_context_vars(
assert cb.completion_tokens == 1
assert cb.total_cost > 0
wait_for_all_tracers()
assert LangChainTracer._persist_run_single.call_count == 4 # type: ignore
assert LangChainTracer._persist_run_single.call_count == 1 # type: ignore

View File

@@ -55,7 +55,6 @@ EXPECTED_ALL = [
"DedocFileLoader",
"DedocPDFLoader",
"PebbloSafeLoader",
"PebbloTextLoader",
"DiffbotLoader",
"DirectoryLoader",
"DiscordChatLoader",

View File

@@ -12,7 +12,6 @@ def raw_docs() -> List[Dict]:
return [
{"_id": "1", "address": {"building": "1", "room": "1"}},
{"_id": "2", "address": {"building": "2", "room": "2"}},
{"_id": "3", "address": {"building": "3", "room": "2"}},
]
@@ -20,23 +19,18 @@ def raw_docs() -> List[Dict]:
def expected_documents() -> List[Document]:
return [
Document(
page_content="{'_id': '2', 'address': {'building': '2', 'room': '2'}}",
page_content="{'_id': '1', 'address': {'building': '1', 'room': '1'}}",
metadata={"database": "sample_restaurants", "collection": "restaurants"},
),
Document(
page_content="{'_id': '3', 'address': {'building': '3', 'room': '2'}}",
page_content="{'_id': '2', 'address': {'building': '2', 'room': '2'}}",
metadata={"database": "sample_restaurants", "collection": "restaurants"},
),
]
@pytest.mark.requires("motor")
async def test_load_mocked_with_filters(expected_documents: List[Document]) -> None:
filter_criteria = {"address.room": {"$eq": "2"}}
field_names = ["address.building", "address.room"]
metadata_names = ["_id"]
include_db_collection_in_metadata = True
async def test_load_mocked(expected_documents: List[Document]) -> None:
mock_async_load = AsyncMock()
mock_async_load.return_value = expected_documents
@@ -57,13 +51,7 @@ async def test_load_mocked_with_filters(expected_documents: List[Document]) -> N
new=mock_async_load,
):
loader = MongodbLoader(
"mongodb://localhost:27017",
"test_db",
"test_collection",
filter_criteria=filter_criteria,
field_names=field_names,
metadata_names=metadata_names,
include_db_collection_in_metadata=include_db_collection_in_metadata,
"mongodb://localhost:27017", "test_db", "test_collection"
)
loader.collection = mock_collection
documents = await loader.aload()

View File

@@ -1,62 +0,0 @@
import re
from pathlib import Path
import pytest
from langchain_community.document_loaders import PyPDFLoader
path_to_simple_pdf = (
Path(__file__).parent.parent.parent / "integration_tests/examples/hello.pdf"
)
path_to_layout_pdf = (
Path(__file__).parent.parent
/ "document_loaders/sample_documents/layout-parser-paper.pdf"
)
path_to_layout_pdf_txt = (
Path(__file__).parent.parent.parent
/ "integration_tests/examples/layout-parser-paper-page-1.txt"
)
@pytest.mark.requires("pypdf")
def test_pypdf_loader() -> None:
"""Test PyPDFLoader."""
loader = PyPDFLoader(str(path_to_simple_pdf))
docs = loader.load()
assert len(docs) == 1
loader = PyPDFLoader(str(path_to_layout_pdf))
docs = loader.load()
assert len(docs) == 16
for page, doc in enumerate(docs):
assert doc.metadata["page"] == page
assert doc.metadata["source"].endswith("layout-parser-paper.pdf")
assert len(doc.page_content) > 10
first_page = docs[0].page_content
for expected in ["LayoutParser", "A Unified Toolkit"]:
assert expected in first_page
@pytest.mark.requires("pypdf")
def test_pypdf_loader_with_layout() -> None:
"""Test PyPDFLoader with layout mode."""
loader = PyPDFLoader(str(path_to_layout_pdf), extraction_mode="layout")
docs = loader.load()
assert len(docs) == 16
for page, doc in enumerate(docs):
assert doc.metadata["page"] == page
assert doc.metadata["source"].endswith("layout-parser-paper.pdf")
assert len(doc.page_content) > 10
first_page = docs[0].page_content
for expected in ["LayoutParser", "A Unified Toolkit"]:
assert expected in first_page
expected = path_to_layout_pdf_txt.read_text(encoding="utf-8")
cleaned_first_page = re.sub(r"\x00", "", first_page)
cleaned_expected = re.sub(r"\x00", "", expected)
assert cleaned_first_page == cleaned_expected

View File

@@ -25,11 +25,6 @@ def test_pebblo_import() -> None:
from langchain_community.document_loaders import PebbloSafeLoader # noqa: F401
def test_pebblo_text_loader_import() -> None:
"""Test that the Pebblo text loader can be imported."""
from langchain_community.document_loaders import PebbloTextLoader # noqa: F401
def test_empty_filebased_loader(mocker: MockerFixture) -> None:
"""Test basic file based csv loader."""
# Setup
@@ -151,42 +146,3 @@ def test_pebblo_safe_loader_api_key() -> None:
# Assert
assert loader.pb_client.api_key == api_key
assert loader.pb_client.classifier_location == "local"
def test_pebblo_text_loader(mocker: MockerFixture) -> None:
"""
Test loading in-memory text with PebbloTextLoader and PebbloSafeLoader.
"""
# Setup
from langchain_community.document_loaders import PebbloSafeLoader, PebbloTextLoader
mocker.patch.multiple(
"requests",
get=MockResponse(json_data={"data": ""}, status_code=200),
post=MockResponse(json_data={"data": ""}, status_code=200),
)
text = "This is a test text."
source = "fake_source"
expected_docs = [
Document(
metadata={
"full_path": source,
"pb_checksum": None,
},
page_content=text,
),
]
# Exercise
texts = [text]
loader = PebbloSafeLoader(
PebbloTextLoader(texts, source=source),
"dummy_app_name",
"dummy_owner",
"dummy_description",
)
result = loader.load()
# Assert
assert result == expected_docs

View File

@@ -1,7 +1,3 @@
from textwrap import dedent
from typing import Any
from unittest.mock import MagicMock, patch
import pytest as pytest
from langchain_community.document_loaders.web_base import WebBaseLoader
@@ -23,62 +19,3 @@ class TestWebBaseLoader:
assert web_base_loader.web_paths == ["https://www.example.com"]
web_base_loader = WebBaseLoader(web_path="https://www.example.com")
assert web_base_loader.web_paths == ["https://www.example.com"]
@pytest.mark.requires("bs4")
@patch("langchain_community.document_loaders.web_base.requests.Session.get")
def test_lazy_load(mock_get: Any) -> None:
import bs4
mock_response = MagicMock()
mock_response.text = "<html><body><p>Test content</p></body></html>"
mock_get.return_value = mock_response
loader = WebBaseLoader(web_paths=["https://www.example.com"])
results = list(loader.lazy_load())
mock_get.assert_called_with("https://www.example.com")
assert len(results) == 1
assert results[0].page_content == "Test content"
# Test bs4 kwargs
mock_html = dedent("""
<html>
<body>
<p>Test content</p>
<div class="special-class">This is a div with a special class</div>
</body>
</html>
""")
mock_response = MagicMock()
mock_response.text = mock_html
mock_get.return_value = mock_response
loader = WebBaseLoader(
web_paths=["https://www.example.com"],
bs_kwargs={"parse_only": bs4.SoupStrainer(class_="special-class")},
)
results = list(loader.lazy_load())
assert len(results) == 1
assert results[0].page_content == "This is a div with a special class"
@pytest.mark.requires("bs4")
@patch("aiohttp.ClientSession.get")
def test_aload(mock_get: Any) -> None:
async def mock_text() -> str:
return "<html><body><p>Test content</p></body></html>"
mock_response = MagicMock()
mock_response.text = mock_text
mock_get.return_value.__aenter__.return_value = mock_response
loader = WebBaseLoader(
web_paths=["https://www.example.com"],
header_template={"User-Agent": "test-user-agent"},
)
results = loader.aload()
assert len(results) == 1
assert results[0].page_content == "Test content"
mock_get.assert_called_with(
"https://www.example.com", headers={"User-Agent": "test-user-agent"}, cookies={}
)

View File

@@ -1,5 +1,6 @@
from langchain_core.graph_vectorstores.links import Link
from langchain_community.graph_vectorstores.extractors import HierarchyLinkExtractor
from langchain_community.graph_vectorstores.links import Link
PATH_1 = ["Root", "H1", "h2"]

View File

@@ -1,6 +1,6 @@
import pytest
from langchain_core.graph_vectorstores import Link
from langchain_community.graph_vectorstores import Link
from langchain_community.graph_vectorstores.extractors import (
HtmlInput,
HtmlLinkExtractor,

View File

@@ -1,12 +1,12 @@
from typing import Set
from langchain_core.documents import Document
from langchain_core.graph_vectorstores.links import Link, get_links
from langchain_community.graph_vectorstores.extractors import (
LinkExtractor,
LinkExtractorTransformer,
)
from langchain_community.graph_vectorstores.links import Link, get_links
TEXT1 = "Text1"
TEXT2 = "Text2"

View File

@@ -77,6 +77,7 @@ EXPECT_ALL = [
"RWKV",
"Replicate",
"SagemakerEndpoint",
"Sambaverse",
"SambaStudio",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",

View File

@@ -190,40 +190,3 @@ def test_additional_search_options() -> None:
)
assert vector_store.client is not None
assert vector_store.client._api_version == "test"
@pytest.mark.requires("azure.search.documents")
def test_ids_used_correctly() -> None:
"""Check whether vector store uses the document ids when provided with them."""
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from langchain_core.documents import Document
class Response:
def __init__(self) -> None:
self.succeeded: bool = True
def mock_upload_documents(self, documents: List[object]) -> List[Response]: # type: ignore[no-untyped-def]
# assume all documents uploaded successfuly
response = [Response() for _ in documents]
return response
documents = [
Document(
page_content="page zero Lorem Ipsum",
metadata={"source": "document.pdf", "page": 0, "id": "ID-document-1"},
),
Document(
page_content="page one Lorem Ipsum",
metadata={"source": "document.pdf", "page": 1, "id": "ID-document-2"},
),
]
ids_provided = [i.metadata.get("id") for i in documents]
with patch.object(
SearchClient, "upload_documents", mock_upload_documents
), patch.object(SearchIndexClient, "get_index", mock_default_index):
vector_store = create_vector_store()
ids_used_at_upload = vector_store.add_documents(documents, ids=ids_provided)
assert len(ids_provided) == len(ids_used_at_upload)
assert ids_provided == ids_used_at_upload

View File

@@ -53,7 +53,7 @@ LangChain Core compiles LCEL sequences to an _optimized execution plan_, with au
For more check out the [LCEL docs](https://python.langchain.com/docs/expression_language/).
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](https://raw.githubusercontent.com/langchain-ai/langchain/e1d113ea84a2edcf4a7709fc5be0e972ea74a5d9/docs/static/svg/langchain_stack_062024.svg "LangChain Framework Overview")
![Diagram outlining the hierarchical organization of the LangChain framework, displaying the interconnected parts across multiple layers.](../../docs/static/svg/langchain_stack_062024.svg "LangChain Framework Overview")
For more advanced use cases, also check out [LangGraph](https://github.com/langchain-ai/langgraph), which is a graph-based runner for cyclic and recursive LLM workflows.

View File

@@ -14,8 +14,7 @@ import contextlib
import functools
import inspect
import warnings
from collections.abc import Generator
from typing import Any, Callable, TypeVar, Union, cast
from typing import Any, Callable, Generator, Type, TypeVar, Union, cast
from langchain_core._api.internal import is_caller_internal
@@ -27,7 +26,7 @@ class LangChainBetaWarning(DeprecationWarning):
# PUBLIC API
T = TypeVar("T", bound=Union[Callable[..., Any], type])
T = TypeVar("T", bound=Union[Callable[..., Any], Type])
def beta(
@@ -155,7 +154,7 @@ def beta(
_name = _name or obj.fget.__qualname__
old_doc = obj.__doc__
class _BetaProperty(property):
class _beta_property(property):
"""A beta property."""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
@@ -186,7 +185,7 @@ def beta(
def finalize(wrapper: Callable[..., Any], new_doc: str) -> Any:
"""Finalize the property."""
return _BetaProperty(
return _beta_property(
fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc
)

Some files were not shown because too many files have changed in this diff Show More