mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-16 18:24:31 +00:00
Compare commits
19 Commits
langchain-
...
sr/chroma-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfa6a71359 | ||
|
|
8046291a7a | ||
|
|
c2b6b75daa | ||
|
|
3fb0a55122 | ||
|
|
5fb8fd863a | ||
|
|
79a537d308 | ||
|
|
ba2518995d | ||
|
|
04a899ebe3 | ||
|
|
a82d987f09 | ||
|
|
a60fd06784 | ||
|
|
629b7a5a43 | ||
|
|
ab871a7b39 | ||
|
|
d30c56a8c1 | ||
|
|
09c1991e96 | ||
|
|
a7903280dd | ||
|
|
d0f0d1f966 | ||
|
|
403fae8eec | ||
|
|
d6b50ad3f6 | ||
|
|
10a9c24dae |
2
.github/scripts/check_diff.py
vendored
2
.github/scripts/check_diff.py
vendored
@@ -38,8 +38,8 @@ IGNORED_PARTNERS = [
|
||||
]
|
||||
|
||||
PY_312_MAX_PACKAGES = [
|
||||
"libs/partners/huggingface", # https://github.com/pytorch/pytorch/issues/130249
|
||||
"libs/partners/voyageai",
|
||||
"libs/partners/chroma", # https://github.com/chroma-core/chroma/issues/4382
|
||||
]
|
||||
|
||||
|
||||
|
||||
4
.github/scripts/prep_api_docs_build.py
vendored
4
.github/scripts/prep_api_docs_build.py
vendored
@@ -69,7 +69,7 @@ def main():
|
||||
clean_target_directories([
|
||||
p
|
||||
for p in package_yaml["packages"]
|
||||
if p["repo"].startswith("langchain-ai/")
|
||||
if (p["repo"].startswith("langchain-ai/") or p.get("include_in_api_ref"))
|
||||
and p["repo"] != "langchain-ai/langchain"
|
||||
])
|
||||
|
||||
@@ -78,7 +78,7 @@ def main():
|
||||
p
|
||||
for p in package_yaml["packages"]
|
||||
if not p.get("disabled", False)
|
||||
and p["repo"].startswith("langchain-ai/")
|
||||
and (p["repo"].startswith("langchain-ai/") or p.get("include_in_api_ref"))
|
||||
and p["repo"] != "langchain-ai/langchain"
|
||||
])
|
||||
|
||||
|
||||
23
.github/workflows/api_doc_build.yml
vendored
23
.github/workflows/api_doc_build.yml
vendored
@@ -26,7 +26,20 @@ jobs:
|
||||
id: get-unsorted-repos
|
||||
uses: mikefarah/yq@master
|
||||
with:
|
||||
cmd: yq '.packages[].repo' langchain/libs/packages.yml
|
||||
cmd: |
|
||||
yq '
|
||||
.packages[]
|
||||
| select(
|
||||
(
|
||||
(.repo | test("^langchain-ai/"))
|
||||
and
|
||||
(.repo != "langchain-ai/langchain")
|
||||
)
|
||||
or
|
||||
(.include_in_api_ref // false)
|
||||
)
|
||||
| .repo
|
||||
' langchain/libs/packages.yml
|
||||
|
||||
- name: Parse YAML and checkout repos
|
||||
env:
|
||||
@@ -38,11 +51,9 @@ jobs:
|
||||
|
||||
# Checkout each unique repository that is in langchain-ai org
|
||||
for repo in $REPOS; do
|
||||
if [[ "$repo" != "langchain-ai/langchain" && "$repo" == langchain-ai/* ]]; then
|
||||
REPO_NAME=$(echo $repo | cut -d'/' -f2)
|
||||
echo "Checking out $repo to $REPO_NAME"
|
||||
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
|
||||
fi
|
||||
REPO_NAME=$(echo $repo | cut -d'/' -f2)
|
||||
echo "Checking out $repo to $REPO_NAME"
|
||||
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
|
||||
done
|
||||
|
||||
- name: Setup python ${{ env.PYTHON_VERSION }}
|
||||
|
||||
@@ -107,7 +107,7 @@ outputs will appear as part of the [AIMessage](/docs/concepts/messages/#aimessag
|
||||
response object. See for example:
|
||||
|
||||
- Generating [audio outputs](/docs/integrations/chat/openai/#audio-generation-preview) with OpenAI;
|
||||
- Generating [image outputs](/docs/integrations/chat/google_generative_ai/#image-generation) with Google Gemini.
|
||||
- Generating [image outputs](/docs/integrations/chat/google_generative_ai/#multimodal-usage) with Google Gemini.
|
||||
|
||||
#### Tools
|
||||
|
||||
|
||||
@@ -1,35 +1,26 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "raw",
|
||||
"id": "afaf8039",
|
||||
"cell_type": "markdown",
|
||||
"id": "d982c99f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google AI\n",
|
||||
"sidebar_label: Google Gemini\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e49f1e0d",
|
||||
"id": "56a6d990",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"This docs will help you get started with Google AI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html).\n",
|
||||
"Access Google's Generative AI models, including the Gemini family, directly via the Gemini API or experiment rapidly using Google AI Studio. The `langchain-google-genai` package provides the LangChain integration for these models. This is often the best starting point for individual developers.\n",
|
||||
"\n",
|
||||
"Google AI offers a number of different chat models. For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini).\n",
|
||||
"For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All examples use the `gemini-2.0-flash` model. Gemini 2.5 Pro and 2.5 Flash can be used via `gemini-2.5-pro-preview-03-25` and `gemini-2.5-flash-preview-04-17`. All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n",
|
||||
"\n",
|
||||
":::info Google AI vs Google Cloud Vertex AI\n",
|
||||
"\n",
|
||||
"Google's Gemini models are accessible through Google AI and through Google Cloud Vertex AI. Using Google AI just requires a Google account and an API key. Using Google Cloud Vertex AI requires a Google Cloud account (with term agreements and billing) but offers enterprise features like customer encryption key, virtual private cloud, and more.\n",
|
||||
"\n",
|
||||
"To learn more about the key features of the two APIs see the [Google docs](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai#google-ai).\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/google_generativeai) | Package downloads | Package latest |\n",
|
||||
@@ -37,23 +28,46 @@
|
||||
"| [ChatGoogleGenerativeAI](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html) | [langchain-google-genai](https://python.langchain.com/api_reference/google_genai/index.html) | ❌ | beta | ✅ |  |  |\n",
|
||||
"\n",
|
||||
"### Model features\n",
|
||||
"\n",
|
||||
"| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n",
|
||||
"| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n",
|
||||
"| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"### Setup\n",
|
||||
"\n",
|
||||
"To access Google AI models you'll need to create a Google Acount account, get a Google AI API key, and install the `langchain-google-genai` integration package.\n",
|
||||
"To access Google AI models you'll need to create a Google Account, get a Google AI API key, and install the `langchain-google-genai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Head to https://ai.google.dev/gemini-api/docs/api-key to generate a Google AI API key. Once you've done this set the GOOGLE_API_KEY environment variable:"
|
||||
"**1. Installation:**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "433e8d2b-9519-4b49-b2c4-7ab65b046c94",
|
||||
"id": "8d12ce35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -U langchain-google-genai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "60be0b38",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**2. Credentials:**\n",
|
||||
"\n",
|
||||
"Head to [https://ai.google.dev/gemini-api/docs/api-key](https://ai.google.dev/gemini-api/docs/api-key) (or via Google AI Studio) to generate a Google AI API key.\n",
|
||||
"\n",
|
||||
"### Chat Models\n",
|
||||
"\n",
|
||||
"Use the `ChatGoogleGenerativeAI` class to interact with Google's chat models. See the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html) for full details.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "fb18c875",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -66,7 +80,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72ee0c4b-9764-423a-9dbf-95129e185210",
|
||||
"id": "f050e8db",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
@@ -75,7 +89,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"id": "82cb346f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -85,27 +99,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0730d6a1-c893-4840-9817-5e5251676d5d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Installation\n",
|
||||
"\n",
|
||||
"The LangChain Google AI integration lives in the `langchain-google-genai` package:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "652d6238-1f87-422a-b135-f5abbb8652fc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install -qU langchain-google-genai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a38cde65-254d-4219-a441-068766c0d4b5",
|
||||
"id": "273cefa0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Instantiation\n",
|
||||
@@ -115,15 +109,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"execution_count": 4,
|
||||
"id": "7d3dc0b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-2.0-flash-001\",\n",
|
||||
" model=\"gemini-2.0-flash\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
@@ -134,7 +128,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2b4f3e15",
|
||||
"id": "343a8c13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Invocation"
|
||||
@@ -142,19 +136,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "62e0dbc3",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"execution_count": 5,
|
||||
"id": "82c5708c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'model_name': 'gemini-2.0-flash-001', 'safety_ratings': []}, id='run-61cff164-40be-4f88-a2df-cca58297502f-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27, 'input_token_details': {'cache_read': 0}})"
|
||||
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'model_name': 'gemini-2.0-flash', 'safety_ratings': []}, id='run-3b28d4b8-8a62-4e6c-ad4e-b53e6e825749-0', usage_metadata={'input_tokens': 20, 'output_tokens': 7, 'total_tokens': 27, 'input_token_details': {'cache_read': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -173,8 +165,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d86145b3-bfef-46e8-b227-4dda5c9c2705",
|
||||
"execution_count": 6,
|
||||
"id": "49d2d0c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -191,7 +183,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "18e2bfc0-7e78-4528-a73f-499ac150dca8",
|
||||
"id": "ee3f6e1d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Chaining\n",
|
||||
@@ -201,17 +193,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b",
|
||||
"execution_count": 7,
|
||||
"id": "3c8407ee",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'model_name': 'gemini-2.0-flash-001', 'safety_ratings': []}, id='run-dd2f8fb9-62d9-4b84-9c97-ed9c34cda313-0', usage_metadata={'input_tokens': 15, 'output_tokens': 7, 'total_tokens': 22, 'input_token_details': {'cache_read': 0}})"
|
||||
"AIMessage(content='Ich liebe Programmieren.', additional_kwargs={}, response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'model_name': 'gemini-2.0-flash', 'safety_ratings': []}, id='run-e5561c6b-2beb-4411-9210-4796b576a7cd-0', usage_metadata={'input_tokens': 15, 'output_tokens': 7, 'total_tokens': 22, 'input_token_details': {'cache_read': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -241,22 +233,164 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "41c2ff10-a3ba-4f40-b3aa-7a395854849e",
|
||||
"id": "bdae9742",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Image generation\n",
|
||||
"## Multimodal Usage\n",
|
||||
"\n",
|
||||
"Some Gemini models (specifically `gemini-2.0-flash-exp`) support image generation capabilities.\n",
|
||||
"Gemini models can accept multimodal inputs (text, images, audio, video) and, for some models, generate multimodal outputs.\n",
|
||||
"\n",
|
||||
"### Text to image\n",
|
||||
"### Image Input\n",
|
||||
"\n",
|
||||
"See a simple usage example below:"
|
||||
"Provide image inputs along with text using a `HumanMessage` with a list content format. The `gemini-2.0-flash` model can handle images."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "7589e14d-8d1b-4c82-965f-5558d80cb677",
|
||||
"execution_count": null,
|
||||
"id": "6833fe5d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"# Example using a public URL (remains the same)\n",
|
||||
"message_url = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"Describe the image at the URL.\",\n",
|
||||
" },\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": \"https://picsum.photos/seed/picsum/200/300\"},\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"result_url = llm.invoke([message_url])\n",
|
||||
"print(f\"Response for URL image: {result_url.content}\")\n",
|
||||
"\n",
|
||||
"# Example using a local image file encoded in base64\n",
|
||||
"image_file_path = \"/Users/philschmid/projects/google-gemini/langchain/docs/static/img/agents_vs_chains.png\"\n",
|
||||
"\n",
|
||||
"with open(image_file_path, \"rb\") as image_file:\n",
|
||||
" encoded_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"message_local = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"Describe the local image.\"},\n",
|
||||
" {\"type\": \"image_url\", \"image_url\": f\"data:image/png;base64,{encoded_image}\"},\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"result_local = llm.invoke([message_local])\n",
|
||||
"print(f\"Response for local image: {result_local.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1b422382",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Other supported `image_url` formats:\n",
|
||||
"- A Google Cloud Storage URI (`gs://...`). Ensure the service account has access.\n",
|
||||
"- A PIL Image object (the library handles encoding).\n",
|
||||
"\n",
|
||||
"### Audio Input\n",
|
||||
"\n",
|
||||
"Provide audio file inputs along with text. Use a model like `gemini-2.0-flash`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a3461836",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"\n",
|
||||
"# Ensure you have an audio file named 'example_audio.mp3' or provide the correct path.\n",
|
||||
"audio_file_path = \"example_audio.mp3\"\n",
|
||||
"audio_mime_type = \"audio/mpeg\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with open(audio_file_path, \"rb\") as audio_file:\n",
|
||||
" encoded_audio = base64.b64encode(audio_file.read()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"Transcribe the audio.\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"media\",\n",
|
||||
" \"data\": encoded_audio, # Use base64 string directly\n",
|
||||
" \"mime_type\": audio_mime_type,\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"response = llm.invoke([message]) # Uncomment to run\n",
|
||||
"print(f\"Response for audio: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0d898e27",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Video Input\n",
|
||||
"\n",
|
||||
"Provide video file inputs along with text. Use a model like `gemini-2.0-flash`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3046e74b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"# Ensure you have a video file named 'example_video.mp4' or provide the correct path.\n",
|
||||
"video_file_path = \"example_video.mp4\"\n",
|
||||
"video_mime_type = \"video/mp4\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"with open(video_file_path, \"rb\") as video_file:\n",
|
||||
" encoded_video = base64.b64encode(video_file.read()).decode(\"utf-8\")\n",
|
||||
"\n",
|
||||
"message = HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"Describe the first few frames of the video.\"},\n",
|
||||
" {\n",
|
||||
" \"type\": \"media\",\n",
|
||||
" \"data\": encoded_video, # Use base64 string directly\n",
|
||||
" \"mime_type\": video_mime_type,\n",
|
||||
" },\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"response = llm.invoke([message]) # Uncomment to run\n",
|
||||
"print(f\"Response for video: {response.content}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2df11d89",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Image Generation (Multimodal Output)\n",
|
||||
"\n",
|
||||
"The `gemini-2.0-flash` model can generate text and images inline (image generation is experimental). You need to specify the desired `response_modalities`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c0b7180f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -266,17 +400,12 @@
|
||||
"<IPython.core.display.Image object>"
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"image/png": {
|
||||
"width": 300
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"from io import BytesIO\n",
|
||||
"\n",
|
||||
"from IPython.display import Image, display\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
@@ -301,7 +430,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b14c0d87-cf7e-4d88-bda1-2ab40ec0350a",
|
||||
"id": "14bf00f1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Image and text to image\n",
|
||||
@@ -311,8 +440,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0f4ed7a5-980c-4b54-b743-0b988909744c",
|
||||
"execution_count": null,
|
||||
"id": "d65e195c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -322,11 +451,7 @@
|
||||
"<IPython.core.display.Image object>"
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"image/png": {
|
||||
"width": 300
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
@@ -349,7 +474,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a62669d8-becd-495f-8f4a-82d7c5d87969",
|
||||
"id": "43b54d3f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also represent an input image and query in a single message by encoding the base64 data in the [data URI scheme](https://en.wikipedia.org/wiki/Data_URI_scheme):"
|
||||
@@ -357,8 +482,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "6241da43-e210-43bc-89af-b3c480ea06e9",
|
||||
"execution_count": null,
|
||||
"id": "0dfc7e1e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -368,11 +493,7 @@
|
||||
"<IPython.core.display.Image object>"
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"image/png": {
|
||||
"width": 300
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
@@ -403,7 +524,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cfe228d3-6773-4283-9788-87bdf6912b1c",
|
||||
"id": "789818d7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also use LangGraph to manage the conversation history for you as in [this tutorial](/docs/tutorials/chatbot/)."
|
||||
@@ -411,7 +532,313 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd",
|
||||
"id": "b037e2dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Tool Calling\n",
|
||||
"\n",
|
||||
"You can equip the model with tools to call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b0d759f9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'name': 'get_weather', 'args': {'location': 'San Francisco'}, 'id': 'a6248087-74c5-4b7c-9250-f335e642927c', 'type': 'tool_call'}]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"OK. It's sunny in San Francisco.\", additional_kwargs={}, response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'model_name': 'gemini-2.0-flash', 'safety_ratings': []}, id='run-ac5bb52c-e244-4c72-9fbc-fb2a9cd7a72e-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40, 'input_token_details': {'cache_read': 0}})"
|
||||
]
|
||||
},
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.tools import tool\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the tool\n",
|
||||
"@tool(description=\"Get the current weather in a given location\")\n",
|
||||
"def get_weather(location: str) -> str:\n",
|
||||
" return \"It's sunny.\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initialize the model and bind the tool\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
|
||||
"llm_with_tools = llm.bind_tools([get_weather])\n",
|
||||
"\n",
|
||||
"# Invoke the model with a query that should trigger the tool\n",
|
||||
"query = \"What's the weather in San Francisco?\"\n",
|
||||
"ai_msg = llm_with_tools.invoke(query)\n",
|
||||
"\n",
|
||||
"# Check the tool calls in the response\n",
|
||||
"print(ai_msg.tool_calls)\n",
|
||||
"\n",
|
||||
"# Example tool call message would be needed here if you were actually running the tool\n",
|
||||
"from langchain_core.messages import ToolMessage\n",
|
||||
"\n",
|
||||
"tool_message = ToolMessage(\n",
|
||||
" content=get_weather(*ai_msg.tool_calls[0][\"args\"]),\n",
|
||||
" tool_call_id=ai_msg.tool_calls[0][\"id\"],\n",
|
||||
")\n",
|
||||
"llm_with_tools.invoke([ai_msg, tool_message]) # Example of passing tool result back"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "91d42b86",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Structured Output\n",
|
||||
"\n",
|
||||
"Force the model to respond with a specific structure using Pydantic models."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "7457dbe4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"name='Abraham Lincoln' height_m=1.93\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Define the desired structure\n",
|
||||
"class Person(BaseModel):\n",
|
||||
" \"\"\"Information about a person.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str = Field(..., description=\"The person's name\")\n",
|
||||
" height_m: float = Field(..., description=\"The person's height in meters\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Initialize the model\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", temperature=0)\n",
|
||||
"structured_llm = llm.with_structured_output(Person)\n",
|
||||
"\n",
|
||||
"# Invoke the model with a query asking for structured information\n",
|
||||
"result = structured_llm.invoke(\n",
|
||||
" \"Who was the 16th president of the USA, and how tall was he in meters?\"\n",
|
||||
")\n",
|
||||
"print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "90d4725e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"## Token Usage Tracking\n",
|
||||
"\n",
|
||||
"Access token usage information from the response metadata."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "edcc003e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prompt engineering is the art and science of crafting effective text prompts to elicit desired and accurate responses from large language models.\n",
|
||||
"\n",
|
||||
"Usage Metadata:\n",
|
||||
"{'input_tokens': 10, 'output_tokens': 24, 'total_tokens': 34, 'input_token_details': {'cache_read': 0}}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
|
||||
"\n",
|
||||
"result = llm.invoke(\"Explain the concept of prompt engineering in one sentence.\")\n",
|
||||
"\n",
|
||||
"print(result.content)\n",
|
||||
"print(\"\\nUsage Metadata:\")\n",
|
||||
"print(result.usage_metadata)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28950dbc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Built-in tools\n",
|
||||
"\n",
|
||||
"Google Gemini supports a variety of built-in tools ([google search](https://ai.google.dev/gemini-api/docs/grounding/search-suggestions), [code execution](https://ai.google.dev/gemini-api/docs/code-execution?lang=python)), which can be bound to the model in the usual way."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dd074816",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The next total solar eclipse visible in the United States will occur on August 23, 2044. However, the path of totality will only pass through Montana, North Dakota, and South Dakota.\n",
|
||||
"\n",
|
||||
"For a total solar eclipse that crosses a significant portion of the continental U.S., you'll have to wait until August 12, 2045. This eclipse will start in California and end in Florida.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from google.ai.generativelanguage_v1beta.types import Tool as GenAITool\n",
|
||||
"\n",
|
||||
"resp = llm.invoke(\n",
|
||||
" \"When is the next total solar eclipse in US?\",\n",
|
||||
" tools=[GenAITool(google_search={})],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(resp.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "6964be2d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Executable code: print(2*2)\n",
|
||||
"\n",
|
||||
"Code execution result: 4\n",
|
||||
"\n",
|
||||
"2*2 is 4.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/Users/philschmid/projects/google-gemini/langchain/.venv/lib/python3.9/site-packages/langchain_google_genai/chat_models.py:580: UserWarning: \n",
|
||||
" ⚠️ Warning: Output may vary each run. \n",
|
||||
" - 'executable_code': Always present. \n",
|
||||
" - 'execution_result' & 'image_url': May be absent for some queries. \n",
|
||||
"\n",
|
||||
" Validate before using in production.\n",
|
||||
"\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from google.ai.generativelanguage_v1beta.types import Tool as GenAITool\n",
|
||||
"\n",
|
||||
"resp = llm.invoke(\n",
|
||||
" \"What is 2*2, use python\",\n",
|
||||
" tools=[GenAITool(code_execution={})],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for c in resp.content:\n",
|
||||
" if isinstance(c, dict):\n",
|
||||
" if c[\"type\"] == \"code_execution_result\":\n",
|
||||
" print(f\"Code execution result: {c['code_execution_result']}\")\n",
|
||||
" elif c[\"type\"] == \"executable_code\":\n",
|
||||
" print(f\"Executable code: {c['executable_code']}\")\n",
|
||||
" else:\n",
|
||||
" print(c)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a27e6ff4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Native Async\n",
|
||||
"\n",
|
||||
"Use asynchronous methods for non-blocking calls."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "c6803e57",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Async Invoke Result: The sky is blue due to a phenomenon called **Rayle...\n",
|
||||
"\n",
|
||||
"Async Stream Result:\n",
|
||||
"The thread is free, it does not wait,\n",
|
||||
"For answers slow, or tasks of fate.\n",
|
||||
"A promise made, a future bright,\n",
|
||||
"It moves ahead, with all its might.\n",
|
||||
"\n",
|
||||
"A callback waits, a signal sent,\n",
|
||||
"When data's read, or job is spent.\n",
|
||||
"Non-blocking code, a graceful dance,\n",
|
||||
"Responsive apps, a fleeting glance.\n",
|
||||
"\n",
|
||||
"Async Batch Results: ['1 + 1 = 2', '2 + 2 = 4']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_async_calls():\n",
|
||||
" # Async invoke\n",
|
||||
" result_ainvoke = await llm.ainvoke(\"Why is the sky blue?\")\n",
|
||||
" print(\"Async Invoke Result:\", result_ainvoke.content[:50] + \"...\")\n",
|
||||
"\n",
|
||||
" # Async stream\n",
|
||||
" print(\"\\nAsync Stream Result:\")\n",
|
||||
" async for chunk in llm.astream(\n",
|
||||
" \"Write a short poem about asynchronous programming.\"\n",
|
||||
" ):\n",
|
||||
" print(chunk.content, end=\"\", flush=True)\n",
|
||||
" print(\"\\n\")\n",
|
||||
"\n",
|
||||
" # Async batch\n",
|
||||
" results_abatch = await llm.abatch([\"What is 1+1?\", \"What is 2+2?\"])\n",
|
||||
" print(\"Async Batch Results:\", [res.content for res in results_abatch])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await run_async_calls()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "99204b32",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Safety Settings\n",
|
||||
@@ -421,8 +848,8 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "238b2f96-e573-4fac-bbf2-7e52ad926833",
|
||||
"execution_count": null,
|
||||
"id": "d4c14039",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -442,7 +869,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5805d40c-deb8-4924-8e72-a294a0482fc9",
|
||||
"id": "dea38fb1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For an enumeration of the categories and thresholds available, see Google's [safety setting types](https://ai.google.dev/api/python/google/generativeai/types/SafetySettingDict)."
|
||||
@@ -450,7 +877,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3",
|
||||
"id": "d6d0e853",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
@@ -461,7 +888,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -475,7 +902,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1413,6 +1413,23 @@
|
||||
"second_output_message = llm.invoke(history)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "90c18d18-b25c-4509-a639-bd652b92f518",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Flex processing\n",
|
||||
"\n",
|
||||
"OpenAI offers a variety of [service tiers](https://platform.openai.com/docs/guides/flex-processing). The \"flex\" tier offers cheaper pricing for requests, with the trade-off that responses may take longer and resources might not always be available. This approach is best suited for non-critical tasks, including model testing, data enhancement, or jobs that can be run asynchronously.\n",
|
||||
"\n",
|
||||
"To use it, initialize the model with `service_tier=\"flex\"`:\n",
|
||||
"```python\n",
|
||||
"llm = ChatOpenAI(model=\"o4-mini\", service_tier=\"flex\")\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Note that this is a beta feature that is only available for a subset of models. See OpenAI [docs](https://platform.openai.com/docs/guides/flex-processing) for more detail."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a796d728-971b-408b-88d5-440015bbb941",
|
||||
@@ -1420,7 +1437,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatOpenAI features and configurations head to the API reference: https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html"
|
||||
"For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
@@ -34,33 +34,46 @@
|
||||
"id": "juAmbgoWD17u"
|
||||
},
|
||||
"source": [
|
||||
"The AstraDB Document Loader returns a list of Langchain Documents from an AstraDB database.\n",
|
||||
"The Astra DB Document Loader returns a list of Langchain `Document` objects read from an Astra DB collection.\n",
|
||||
"\n",
|
||||
"The Loader takes the following parameters:\n",
|
||||
"The loader takes the following parameters:\n",
|
||||
"\n",
|
||||
"* `api_endpoint`: AstraDB API endpoint. Looks like `https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com`\n",
|
||||
"* `token`: AstraDB token. Looks like `AstraCS:6gBhNmsk135....`\n",
|
||||
"* `api_endpoint`: Astra DB API endpoint. Looks like `https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com`\n",
|
||||
"* `token`: Astra DB token. Looks like `AstraCS:aBcD0123...`\n",
|
||||
"* `collection_name` : AstraDB collection name\n",
|
||||
"* `namespace`: (Optional) AstraDB namespace\n",
|
||||
"* `namespace`: (Optional) AstraDB namespace (called _keyspace_ in Astra DB)\n",
|
||||
"* `filter_criteria`: (Optional) Filter used in the find query\n",
|
||||
"* `projection`: (Optional) Projection used in the find query\n",
|
||||
"* `find_options`: (Optional) Options used in the find query\n",
|
||||
"* `nb_prefetched`: (Optional) Number of documents pre-fetched by the loader\n",
|
||||
"* `limit`: (Optional) Maximum number of documents to retrieve\n",
|
||||
"* `extraction_function`: (Optional) A function to convert the AstraDB document to the LangChain `page_content` string. Defaults to `json.dumps`\n",
|
||||
"\n",
|
||||
"The following metadata is set to the LangChain Documents metadata output:\n",
|
||||
"The loader sets the following metadata for the documents it reads:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"{\n",
|
||||
" metadata : {\n",
|
||||
" \"namespace\": \"...\", \n",
|
||||
" \"api_endpoint\": \"...\", \n",
|
||||
" \"collection\": \"...\"\n",
|
||||
" }\n",
|
||||
"metadata={\n",
|
||||
" \"namespace\": \"...\", \n",
|
||||
" \"api_endpoint\": \"...\", \n",
|
||||
" \"collection\": \"...\"\n",
|
||||
"}\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install \"langchain-astradb>=0.6,<0.7\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
@@ -71,24 +84,43 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import AstraDBLoader"
|
||||
"from langchain_astradb import AstraDBLoader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[**API Reference:** `AstraDBLoader`](https://python.langchain.com/api_reference/astradb/document_loaders/langchain_astradb.document_loaders.AstraDBLoader.html#langchain_astradb.document_loaders.AstraDBLoader)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:41:22.643335Z",
|
||||
"start_time": "2024-01-08T12:40:57.759116Z"
|
||||
},
|
||||
"collapsed": false
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN = ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from getpass import getpass\n",
|
||||
"\n",
|
||||
@@ -98,7 +130,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:42:25.395162Z",
|
||||
@@ -112,19 +144,22 @@
|
||||
" token=ASTRA_DB_APPLICATION_TOKEN,\n",
|
||||
" collection_name=\"movie_reviews\",\n",
|
||||
" projection={\"title\": 1, \"reviewtext\": 1},\n",
|
||||
" find_options={\"limit\": 10},\n",
|
||||
" limit=10,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:42:30.236489Z",
|
||||
"start_time": "2024-01-08T12:42:29.612133Z"
|
||||
},
|
||||
"collapsed": false
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
"outputs_hidden": false
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -133,7 +168,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-01-08T12:42:31.369394Z",
|
||||
@@ -144,10 +179,10 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Document(page_content='{\"_id\": \"659bdffa16cbc4586b11a423\", \"title\": \"Dangerous Men\", \"reviewtext\": \"\\\\\"Dangerous Men,\\\\\" the picture\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\"}', metadata={'namespace': 'default_keyspace', 'api_endpoint': 'https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com', 'collection': 'movie_reviews'})"
|
||||
"Document(metadata={'namespace': 'default_keyspace', 'api_endpoint': 'https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com', 'collection': 'movie_reviews'}, page_content='{\"_id\": \"659bdffa16cbc4586b11a423\", \"title\": \"Dangerous Men\", \"reviewtext\": \"\\\\\"Dangerous Men,\\\\\" the picture\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\"}')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -179,7 +214,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.18"
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -49,7 +49,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import BrowserbaseLoader"
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.document_loaders import BrowserbaseLoader\n",
|
||||
"\n",
|
||||
"load_dotenv()\n",
|
||||
"\n",
|
||||
"BROWSERBASE_API_KEY = os.getenv(\"BROWSERBASE_API_KEY\")\n",
|
||||
"BROWSERBASE_PROJECT_ID = os.getenv(\"BROWSERBASE_PROJECT_ID\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -59,6 +66,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"loader = BrowserbaseLoader(\n",
|
||||
" api_key=BROWSERBASE_API_KEY,\n",
|
||||
" project_id=BROWSERBASE_PROJECT_ID,\n",
|
||||
" urls=[\n",
|
||||
" \"https://example.com\",\n",
|
||||
" ],\n",
|
||||
@@ -78,52 +87,11 @@
|
||||
"\n",
|
||||
"- `urls` Required. A list of URLs to fetch.\n",
|
||||
"- `text_content` Retrieve only text content. Default is `False`.\n",
|
||||
"- `api_key` Optional. Browserbase API key. Default is `BROWSERBASE_API_KEY` env variable.\n",
|
||||
"- `project_id` Optional. Browserbase Project ID. Default is `BROWSERBASE_PROJECT_ID` env variable.\n",
|
||||
"- `api_key` Browserbase API key. Default is `BROWSERBASE_API_KEY` env variable.\n",
|
||||
"- `project_id` Browserbase Project ID. Default is `BROWSERBASE_PROJECT_ID` env variable.\n",
|
||||
"- `session_id` Optional. Provide an existing Session ID.\n",
|
||||
"- `proxy` Optional. Enable/Disable Proxies."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading images\n",
|
||||
"\n",
|
||||
"You can also load screenshots of webpages (as bytes) for multi-modal models.\n",
|
||||
"\n",
|
||||
"Full example using GPT-4V:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from browserbase import Browserbase\n",
|
||||
"from browserbase.helpers.gpt4 import GPT4VImage, GPT4VImageDetail\n",
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(model=\"gpt-4-vision-preview\", max_tokens=256)\n",
|
||||
"browser = Browserbase()\n",
|
||||
"\n",
|
||||
"screenshot = browser.screenshot(\"https://browserbase.com\")\n",
|
||||
"\n",
|
||||
"result = chat.invoke(\n",
|
||||
" [\n",
|
||||
" HumanMessage(\n",
|
||||
" content=[\n",
|
||||
" {\"type\": \"text\", \"text\": \"What color is the logo?\"},\n",
|
||||
" GPT4VImage(screenshot, GPT4VImageDetail.auto),\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(result.content)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -3112,8 +3112,8 @@
|
||||
"|------------|---------|\n",
|
||||
"| langchain_astradb.cache | [AstraDBCache](https://python.langchain.com/api_reference/astradb/cache/langchain_astradb.cache.AstraDBCache.html) |\n",
|
||||
"| langchain_astradb.cache | [AstraDBSemanticCache](https://python.langchain.com/api_reference/astradb/cache/langchain_astradb.cache.AstraDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [AstraDBCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.AstraDBCache.html) |\n",
|
||||
"| langchain_community.cache | [AstraDBSemanticCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.AstraDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [AstraDBCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.AstraDBCache.html) (deprecated since `langchain-community==0.0.28`) |\n",
|
||||
"| langchain_community.cache | [AstraDBSemanticCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.AstraDBSemanticCache.html) (deprecated since `langchain-community==0.0.28`) |\n",
|
||||
"| langchain_community.cache | [AzureCosmosDBSemanticCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.AzureCosmosDBSemanticCache.html) |\n",
|
||||
"| langchain_community.cache | [CassandraCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.CassandraCache.html) |\n",
|
||||
"| langchain_community.cache | [CassandraSemanticCache](https://python.langchain.com/api_reference/community/cache/langchain_community.cache.CassandraSemanticCache.html) |\n",
|
||||
|
||||
@@ -17,22 +17,22 @@
|
||||
"id": "f507f58b-bf22-4a48-8daf-68d869bcd1ba",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To run this notebook you need a running Astra DB. Get the connection secrets on your Astra dashboard:\n",
|
||||
"\n",
|
||||
"- the API Endpoint looks like `https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com`;\n",
|
||||
"- the Token looks like `AstraCS:6gBhNmsk135...`."
|
||||
"- the Database Token looks like `AstraCS:aBcD0123...`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "d7092199",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet \"astrapy>=0.7.1 langchain-community\" "
|
||||
"!pip install \"langchain-astradb>=0.6,<0.7\""
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -45,12 +45,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 2,
|
||||
"id": "163d97f0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n",
|
||||
@@ -65,14 +65,6 @@
|
||||
"ASTRA_DB_APPLICATION_TOKEN = getpass.getpass(\"ASTRA_DB_APPLICATION_TOKEN = \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "55860b2d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Depending on whether local or cloud-based Astra DB, create the corresponding database connection \"Session\" object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36c163e8",
|
||||
@@ -83,12 +75,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 3,
|
||||
"id": "d15e3302",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_message_histories import AstraDBChatMessageHistory\n",
|
||||
"from langchain_astradb import AstraDBChatMessageHistory\n",
|
||||
"\n",
|
||||
"message_history = AstraDBChatMessageHistory(\n",
|
||||
" session_id=\"test-session\",\n",
|
||||
@@ -98,22 +90,31 @@
|
||||
"\n",
|
||||
"message_history.add_user_message(\"hi!\")\n",
|
||||
"\n",
|
||||
"message_history.add_ai_message(\"whats up?\")"
|
||||
"message_history.add_ai_message(\"hello, how are you?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "53acb4a8-d536-4a58-9fee-7d70033d9c81",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"[**API Reference:** `AstraDBChatMessageHistory`](https://python.langchain.com/api_reference/astradb/chat_message_histories/langchain_astradb.chat_message_histories.AstraDBChatMessageHistory.html#langchain_astradb.chat_message_histories.AstraDBChatMessageHistory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "64fc465e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[HumanMessage(content='hi!'), AIMessage(content='whats up?')]"
|
||||
"[HumanMessage(content='hi!', additional_kwargs={}, response_metadata={}),\n",
|
||||
" AIMessage(content='hello, how are you?', additional_kwargs={}, response_metadata={})]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -139,7 +140,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -7,10 +7,10 @@
|
||||
|
||||
## Installation and Setup
|
||||
|
||||
We need to install the `hdbcli` python package.
|
||||
We need to install the `langchain-hana` python package.
|
||||
|
||||
```bash
|
||||
pip install hdbcli
|
||||
pip install langchain-hana
|
||||
```
|
||||
|
||||
## Vectorstore
|
||||
@@ -21,5 +21,5 @@ pip install hdbcli
|
||||
See a [usage example](/docs/integrations/vectorstores/sap_hanavector).
|
||||
|
||||
```python
|
||||
from langchain_community.vectorstores.hanavector import HanaDB
|
||||
from langchain_hana import HanaDB
|
||||
```
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Astra DB (Cassandra)\n",
|
||||
"# Astra DB\n",
|
||||
"\n",
|
||||
">[DataStax Astra DB](https://docs.datastax.com/en/astra/home/astra.html) is a serverless vector-capable database built on `Cassandra` and made conveniently available through an easy-to-use JSON API.\n",
|
||||
"\n",
|
||||
@@ -16,32 +16,46 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating an Astra DB vector store\n",
|
||||
"First we'll want to create an Astra DB VectorStore and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n",
|
||||
"First, create an Astra DB vector store and seed it with some data.\n",
|
||||
"\n",
|
||||
"NOTE: The self-query retriever requires you to have `lark` installed (`pip install lark`). We also need the `astrapy` package."
|
||||
"We've created a small demo set of documents containing movie summaries.\n",
|
||||
"\n",
|
||||
"NOTE: The self-query retriever requires the `lark` package installed (`pip install lark`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet lark astrapy langchain-openai"
|
||||
"!pip install \"langchain-astradb>=0.6,<0.7\" \\\n",
|
||||
" \"langchain_openai>=0.3,<0.4\" \\\n",
|
||||
" \"lark>=1.2,<2.0\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key."
|
||||
"In this example, you'll use the `OpenAIEmbeddings`. Please enter an OpenAI API Key."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key: ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from getpass import getpass\n",
|
||||
@@ -69,14 +83,23 @@
|
||||
"Create the Astra DB VectorStore:\n",
|
||||
"\n",
|
||||
"- the API Endpoint looks like `https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com`\n",
|
||||
"- the Token looks like `AstraCS:6gBhNmsk135....`"
|
||||
"- the Token looks like `AstraCS:aBcD0123...`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"ASTRA_DB_API_ENDPOINT = https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN = ········\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ASTRA_DB_API_ENDPOINT = input(\"ASTRA_DB_API_ENDPOINT = \")\n",
|
||||
"ASTRA_DB_APPLICATION_TOKEN = getpass(\"ASTRA_DB_APPLICATION_TOKEN = \")"
|
||||
@@ -84,11 +107,11 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores import AstraDB\n",
|
||||
"from langchain_astradb import AstraDBVectorStore\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"\n",
|
||||
"docs = [\n",
|
||||
@@ -101,11 +124,13 @@
|
||||
" metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
|
||||
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams \"\n",
|
||||
" \"within dreams and Inception reused the idea\",\n",
|
||||
" metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
|
||||
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men \"\n",
|
||||
" \"pine after them\",\n",
|
||||
" metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
@@ -123,7 +148,7 @@
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"vectorstore = AstraDB.from_documents(\n",
|
||||
"vectorstore = AstraDBVectorStore.from_documents(\n",
|
||||
" docs,\n",
|
||||
" embeddings,\n",
|
||||
" collection_name=\"astra_self_query_demo\",\n",
|
||||
@@ -136,13 +161,16 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Creating our self-querying retriever\n",
|
||||
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
|
||||
"## Creating a self-querying retriever\n",
|
||||
"\n",
|
||||
"Now you can instantiate the retriever.\n",
|
||||
"\n",
|
||||
"To do this, you need to provide some information upfront about the metadata fields that the documents support, along with a short description of the documents' contents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -174,7 +202,11 @@
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
" llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n",
|
||||
" llm,\n",
|
||||
" vectorstore,\n",
|
||||
" document_content_description,\n",
|
||||
" metadata_field_info,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
@@ -183,14 +215,29 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Testing it out\n",
|
||||
"And now we can try actually using our retriever!"
|
||||
"\n",
|
||||
"Now you can try actually using our retriever:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='d7b9ec1edafa467caab524455e8c1f5d', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}, page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose'),\n",
|
||||
" Document(id='8ad04ef2a73d4f74897a51e49be1a8d2', metadata={'year': 1995, 'genre': 'animated'}, page_content='Toys come alive and have a blast doing so'),\n",
|
||||
" Document(id='5b07e600d3494506952b60e0a45a0546', metadata={'year': 1979, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'rating': 9.9}, page_content='Three men walk into the Zone, three men walk out of the Zone'),\n",
|
||||
" Document(id='a0cef19e27c341929098ac4793602829', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}, page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.invoke(\"What are some movies about dinosaurs?\")"
|
||||
@@ -198,9 +245,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='5b07e600d3494506952b60e0a45a0546', metadata={'year': 1979, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'rating': 9.9}, page_content='Three men walk into the Zone, three men walk out of the Zone'),\n",
|
||||
" Document(id='a0cef19e27c341929098ac4793602829', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}, page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a filter\n",
|
||||
"retriever.invoke(\"I want to watch a movie rated higher than 8.5\")"
|
||||
@@ -208,9 +267,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='0539843fd203484c9be486c2a0e2454c', metadata={'year': 2019, 'director': 'Greta Gerwig', 'rating': 8.3}, page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a query and a filter\n",
|
||||
"retriever.invoke(\"Has Greta Gerwig directed any movies about women\")"
|
||||
@@ -218,9 +288,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='a0cef19e27c341929098ac4793602829', metadata={'year': 2006, 'director': 'Satoshi Kon', 'rating': 8.6}, page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea'),\n",
|
||||
" Document(id='5b07e600d3494506952b60e0a45a0546', metadata={'year': 1979, 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'rating': 9.9}, page_content='Three men walk into the Zone, three men walk out of the Zone')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a composite filter\n",
|
||||
"retriever.invoke(\"What's a highly rated (above 8.5), science fiction movie ?\")"
|
||||
@@ -228,9 +310,20 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='8ad04ef2a73d4f74897a51e49be1a8d2', metadata={'year': 1995, 'genre': 'animated'}, page_content='Toys come alive and have a blast doing so')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example specifies a query and composite filter\n",
|
||||
"retriever.invoke(\n",
|
||||
@@ -242,20 +335,20 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Filter k\n",
|
||||
"## Set a limit ('k')\n",
|
||||
"\n",
|
||||
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
|
||||
"you can also use the self-query retriever to specify `k`, the number of documents to fetch.\n",
|
||||
"\n",
|
||||
"We can do this by passing `enable_limit=True` to the constructor."
|
||||
"You achieve this by passing `enable_limit=True` to the constructor."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||
"retriever_k = SelfQueryRetriever.from_llm(\n",
|
||||
" llm,\n",
|
||||
" vectorstore,\n",
|
||||
" document_content_description,\n",
|
||||
@@ -267,12 +360,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[Document(id='d7b9ec1edafa467caab524455e8c1f5d', metadata={'year': 1993, 'rating': 7.7, 'genre': 'science fiction'}, page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose'),\n",
|
||||
" Document(id='8ad04ef2a73d4f74897a51e49be1a8d2', metadata={'year': 1995, 'genre': 'animated'}, page_content='Toys come alive and have a blast doing so')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# This example only specifies a relevant query\n",
|
||||
"retriever.invoke(\"What are two movies about dinosaurs?\")"
|
||||
"retriever_k.invoke(\"What are two movies about dinosaurs?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -293,7 +398,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 12,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"jupyter": {
|
||||
@@ -322,7 +427,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.12.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,13 +1,76 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8543d632",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"sidebar_label: Google Gemini\n",
|
||||
"keywords: [google gemini embeddings]\n",
|
||||
"---"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "afab8b36-10bb-4795-bc98-75ab2d2081bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Google Generative AI Embeddings\n",
|
||||
"# Google Generative AI Embeddings (AI Studio & Gemini API)\n",
|
||||
"\n",
|
||||
"Connect to Google's generative AI embeddings service using the `GoogleGenerativeAIEmbeddings` class, found in the [langchain-google-genai](https://pypi.org/project/langchain-google-genai/) package."
|
||||
"Connect to Google's generative AI embeddings service using the `GoogleGenerativeAIEmbeddings` class, found in the [langchain-google-genai](https://pypi.org/project/langchain-google-genai/) package.\n",
|
||||
"\n",
|
||||
"This will help you get started with Google's Generative AI embedding models (like Gemini) using LangChain. For detailed documentation on `GoogleGenerativeAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/v0.2/api_reference/google_genai/embeddings/langchain_google_genai.embeddings.GoogleGenerativeAIEmbeddings.html).\n",
|
||||
"\n",
|
||||
"## Overview\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
"import { ItemTable } from \"@theme/FeatureTables\";\n",
|
||||
"\n",
|
||||
"<ItemTable category=\"text_embedding\" item=\"Google Gemini\" />\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"To access Google Generative AI embedding models you'll need to create a Google Cloud project, enable the Generative Language API, get an API key, and install the `langchain-google-genai` integration package.\n",
|
||||
"\n",
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"To use Google Generative AI models, you must have an API key. You can create one in Google AI Studio. See the [Google documentation](https://ai.google.dev/gemini-api/docs/api-key) for instructions.\n",
|
||||
"\n",
|
||||
"Once you have a key, set it as an environment variable `GOOGLE_API_KEY`:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "47652620",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if not os.getenv(\"GOOGLE_API_KEY\"):\n",
|
||||
" os.environ[\"GOOGLE_API_KEY\"] = getpass.getpass(\"Enter your Google API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "67283790",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To enable automated tracing of your model calls, set your [LangSmith](https://docs.smith.langchain.com/) API key:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eccf1968",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# os.environ[\"LANGSMITH_TRACING\"] = \"true\"\n",
|
||||
"# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -28,28 +91,6 @@
|
||||
"%pip install --upgrade --quiet langchain-google-genai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "25f3f88e-164e-400d-b371-9fa488baba19",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Credentials"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ec89153f-8999-4aab-a21b-0bfba1cc3893",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"if \"GOOGLE_API_KEY\" not in os.environ:\n",
|
||||
" os.environ[\"GOOGLE_API_KEY\"] = getpass.getpass(\"Provide your Google API key here\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2437b22-e364-418a-8c13-490a026cb7b5",
|
||||
@@ -60,17 +101,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 20,
|
||||
"id": "eedc551e-a1f3-4fd8-8d65-4e0784c4441b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[0.05636945, 0.0048285457, -0.0762591, -0.023642512, 0.05329321]"
|
||||
"[-0.024917153641581535,\n",
|
||||
" 0.012005362659692764,\n",
|
||||
" -0.003886754624545574,\n",
|
||||
" -0.05774897709488869,\n",
|
||||
" 0.0020742062479257584]"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -78,7 +123,7 @@
|
||||
"source": [
|
||||
"from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/text-embedding-004\")\n",
|
||||
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-exp-03-07\")\n",
|
||||
"vector = embeddings.embed_query(\"hello, world!\")\n",
|
||||
"vector[:5]"
|
||||
]
|
||||
@@ -95,17 +140,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"id": "6ec53aba-404f-4778-acd9-5d6664e79ed2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(3, 768)"
|
||||
"(3, 3072)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -121,6 +166,56 @@
|
||||
"len(vectors), len(vectors[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c362bfbf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing and Retrieval\n",
|
||||
"\n",
|
||||
"Embedding models are often used in retrieval-augmented generation (RAG) flows, both as part of indexing data as well as later retrieving it. For more detailed instructions, please see our [RAG tutorials](/docs/tutorials/).\n",
|
||||
"\n",
|
||||
"Below, see how to index and retrieve data using the `embeddings` object we initialized above. In this example, we will index and retrieve a sample document in the `InMemoryVectorStore`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "606a7f65",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'LangChain is the framework for building context-aware reasoning applications'"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create a vector store with a sample text\n",
|
||||
"from langchain_core.vectorstores import InMemoryVectorStore\n",
|
||||
"\n",
|
||||
"text = \"LangChain is the framework for building context-aware reasoning applications\"\n",
|
||||
"\n",
|
||||
"vectorstore = InMemoryVectorStore.from_texts(\n",
|
||||
" [text],\n",
|
||||
" embedding=embeddings,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Use the vectorstore as a retriever\n",
|
||||
"retriever = vectorstore.as_retriever()\n",
|
||||
"\n",
|
||||
"# Retrieve the most similar text\n",
|
||||
"retrieved_documents = retriever.invoke(\"What is LangChain?\")\n",
|
||||
"\n",
|
||||
"# show the retrieved document's content\n",
|
||||
"retrieved_documents[0].page_content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1482486f-5617-498a-8a44-1974d3212dda",
|
||||
@@ -129,70 +224,74 @@
|
||||
"## Task type\n",
|
||||
"`GoogleGenerativeAIEmbeddings` optionally support a `task_type`, which currently must be one of:\n",
|
||||
"\n",
|
||||
"- task_type_unspecified\n",
|
||||
"- retrieval_query\n",
|
||||
"- retrieval_document\n",
|
||||
"- semantic_similarity\n",
|
||||
"- classification\n",
|
||||
"- clustering\n",
|
||||
"- `SEMANTIC_SIMILARITY`: Used to generate embeddings that are optimized to assess text similarity.\n",
|
||||
"- `CLASSIFICATION`: Used to generate embeddings that are optimized to classify texts according to preset labels.\n",
|
||||
"- `CLUSTERING`: Used to generate embeddings that are optimized to cluster texts based on their similarities.\n",
|
||||
"- `RETRIEVAL_DOCUMENT`, `RETRIEVAL_QUERY`, `QUESTION_ANSWERING`, and `FACT_VERIFICATION`: Used to generate embeddings that are optimized for document search or information retrieval.\n",
|
||||
"- `CODE_RETRIEVAL_QUERY`: Used to retrieve a code block based on a natural language query, such as sort an array or reverse a linked list. Embeddings of the code blocks are computed using `RETRIEVAL_DOCUMENT`.\n",
|
||||
"\n",
|
||||
"By default, we use `retrieval_document` in the `embed_documents` method and `retrieval_query` in the `embed_query` method. If you provide a task type, we will use that for all methods."
|
||||
"By default, we use `RETRIEVAL_DOCUMENT` in the `embed_documents` method and `RETRIEVAL_QUERY` in the `embed_query` method. If you provide a task type, we will use that for all methods."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "a223bb25-2b1b-418e-a570-2f543083132e",
|
||||
"execution_count": null,
|
||||
"id": "b7acc5c2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install --upgrade --quiet matplotlib scikit-learn"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"execution_count": 19,
|
||||
"id": "f1f077db-8eb4-49f7-8866-471a8528dcdb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Document 1\n",
|
||||
"Cosine similarity with query: 0.7892893360164779\n",
|
||||
"---\n",
|
||||
"Document 2\n",
|
||||
"Cosine similarity with query: 0.5438283285204146\n",
|
||||
"---\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
|
||||
"from sklearn.metrics.pairwise import cosine_similarity\n",
|
||||
"\n",
|
||||
"query_embeddings = GoogleGenerativeAIEmbeddings(\n",
|
||||
" model=\"models/embedding-001\", task_type=\"retrieval_query\"\n",
|
||||
" model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_QUERY\"\n",
|
||||
")\n",
|
||||
"doc_embeddings = GoogleGenerativeAIEmbeddings(\n",
|
||||
" model=\"models/embedding-001\", task_type=\"retrieval_document\"\n",
|
||||
")"
|
||||
" model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_DOCUMENT\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n",
|
||||
"d_embed = doc_embeddings.embed_documents(\n",
|
||||
" [\"The capital of France is Paris.\", \"Philipp is likes to eat pizza.\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for i, d in enumerate(d_embed):\n",
|
||||
" print(f\"Document {i+1}:\")\n",
|
||||
" print(f\"Cosine similarity with query: {cosine_similarity([q_embed], [d])[0][0]}\")\n",
|
||||
" print(\"---\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "79bd4a5e-75ba-413c-befa-86167c938caf",
|
||||
"id": "f45ea7b1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"All of these will be embedded with the 'retrieval_query' task set\n",
|
||||
"```python\n",
|
||||
"query_vecs = [query_embeddings.embed_query(q) for q in [query, query_2, answer_1]]\n",
|
||||
"```\n",
|
||||
"All of these will be embedded with the 'retrieval_document' task set\n",
|
||||
"```python\n",
|
||||
"doc_vecs = [doc_embeddings.embed_query(q) for q in [query, query_2, answer_1]]\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9e1fae5e-0f84-4812-89f5-7d4d71affbc1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In retrieval, relative distance matters. In the image above, you can see the difference in similarity scores between the \"relevant doc\" and \"simil stronger delta between the similar query and relevant doc on the latter case."
|
||||
"## API Reference\n",
|
||||
"\n",
|
||||
"For detailed documentation on `GoogleGenerativeAIEmbeddings` features and configuration options, please refer to the [API reference](https://python.langchain.com/api_reference/google_genai/embeddings/langchain_google_genai.embeddings.GoogleGenerativeAIEmbeddings.html).\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -211,7 +310,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": ".venv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -225,7 +324,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.9.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -6,18 +6,16 @@
|
||||
"source": [
|
||||
"# SAP HANA Cloud Vector Engine\n",
|
||||
"\n",
|
||||
">[SAP HANA Cloud Vector Engine](https://www.sap.com/events/teched/news-guide/ai.html#article8) is a vector store fully integrated into the `SAP HANA Cloud` database.\n",
|
||||
"\n",
|
||||
"You'll need to install `langchain-community` with `pip install -qU langchain-community` to use this integration"
|
||||
">[SAP HANA Cloud Vector Engine](https://help.sap.com/docs/hana-cloud-database/sap-hana-cloud-sap-hana-database-vector-engine-guide/sap-hana-cloud-sap-hana-database-vector-engine-guide) is a vector store fully integrated into the `SAP HANA Cloud` database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setting up\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"Installation of the HANA database driver."
|
||||
"Install the `langchain-hana` external integration package, as well as the other packages used throughout this notebook."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -26,53 +24,36 @@
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Note: you may need to restart the kernel to use updated packages.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Pip install necessary package\n",
|
||||
"%pip install --upgrade --quiet hdbcli"
|
||||
"%pip install -qU langchain-hana"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For `OpenAIEmbeddings` we use the OpenAI API key from the environment."
|
||||
"### Credentials\n",
|
||||
"\n",
|
||||
"Ensure your SAP HANA instance is running. Load your credentials from environment variables and create a connection:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:16.802456Z",
|
||||
"start_time": "2023-09-09T08:02:07.065604Z"
|
||||
}
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"# Use OPENAI_API_KEY env variable\n",
|
||||
"# os.environ[\"OPENAI_API_KEY\"] = \"Your OpenAI API key\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create a database connection to a HANA Cloud instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:28.174088Z",
|
||||
"start_time": "2023-09-09T08:02:28.162698Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from hdbcli import dbapi\n",
|
||||
"\n",
|
||||
@@ -88,6 +69,92 @@
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Learn more about SAP HANA in [What is SAP HANA?](https://www.sap.com/products/data-cloud/hana/what-is-sap-hana.html)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Initialization\n",
|
||||
"To initialize a `HanaDB` vector store, you need a database connection and an embedding instance. SAP HANA Cloud Vector Engine supports both external and internal embeddings."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- #### Using External Embeddings\n",
|
||||
"\n",
|
||||
"import EmbeddingTabs from \"@theme/EmbeddingTabs\";\n",
|
||||
"\n",
|
||||
"<EmbeddingTabs/>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings(model=\"text-embedding-3-large\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"- #### Using Internal Embeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Alternatively, you can compute embeddings directly in SAP HANA using its native `VECTOR_EMBEDDING()` function. To enable this, create an instance of `HanaInternalEmbeddings` with your internal model ID and pass it to `HanaDB`. Note that the `HanaInternalEmbeddings` instance is specifically designed for use with `HanaDB` and is not intended for use with other vector store implementations. For more information about internal embedding, see the [SAP HANA VECTOR_EMBEDDING Function](https://help.sap.com/docs/hana-cloud-database/sap-hana-cloud-sap-hana-database-vector-engine-guide/vector-embedding-function-vector).\n",
|
||||
"\n",
|
||||
"> **Caution:** Ensure NLP is enabled in your SAP HANA Cloud instance."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_hana import HanaInternalEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = HanaInternalEmbeddings(internal_embedding_model_id=\"SAP_NEB.20240715\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Once you have your connection and embedding instance, create the vector store by passing them to `HanaDB` along with a table name for storing vectors:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_hana import HanaDB\n",
|
||||
"\n",
|
||||
"db = HanaDB(\n",
|
||||
" embedding=embeddings, connection=connection, table_name=\"STATE_OF_THE_UNION\"\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -104,7 +171,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:02:25.452472Z",
|
||||
@@ -122,40 +189,16 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.vectorstores.hanavector import HanaDB\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"from langchain_text_splitters import CharacterTextSplitter\n",
|
||||
"\n",
|
||||
"text_documents = TextLoader(\"../../how_to/state_of_the_union.txt\").load()\n",
|
||||
"text_documents = TextLoader(\n",
|
||||
" \"../../how_to/state_of_the_union.txt\", encoding=\"UTF-8\"\n",
|
||||
").load()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n",
|
||||
"text_chunks = text_splitter.split_documents(text_documents)\n",
|
||||
"print(f\"Number of document chunks: {len(text_chunks)}\")\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create a LangChain VectorStore interface for the HANA database and specify the table (collection) to use for accessing the vector embeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:04:16.696625Z",
|
||||
"start_time": "2023-09-09T08:02:31.817790Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = HanaDB(\n",
|
||||
" embedding=embeddings, connection=connection, table_name=\"STATE_OF_THE_UNION\"\n",
|
||||
")"
|
||||
"print(f\"Number of document chunks: {len(text_chunks)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -167,7 +210,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -176,7 +219,7 @@
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -199,7 +242,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -235,7 +278,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -254,7 +297,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.vectorstores.utils import DistanceStrategy\n",
|
||||
"from langchain_hana.utils import DistanceStrategy\n",
|
||||
"\n",
|
||||
"db = HanaDB(\n",
|
||||
" embedding=embeddings,\n",
|
||||
@@ -286,7 +329,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-09-09T08:05:23.276819Z",
|
||||
@@ -336,7 +379,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -411,7 +454,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -420,7 +463,7 @@
|
||||
"True"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -443,7 +486,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -452,7 +495,7 @@
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -471,7 +514,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -480,7 +523,7 @@
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -508,7 +551,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -539,7 +582,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -578,13 +621,14 @@
|
||||
"| `$nin` | Not contained in a set of given values (not in) |\n",
|
||||
"| `$between` | Between the range of two boundary values |\n",
|
||||
"| `$like` | Text equality based on the \"LIKE\" semantics in SQL (using \"%\" as wildcard) |\n",
|
||||
"| `$contains` | Filters documents containing a specific keyword |\n",
|
||||
"| `$and` | Logical \"and\", supporting 2 or more operands |\n",
|
||||
"| `$or` | Logical \"or\", supporting 2 or more operands |"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -592,15 +636,15 @@
|
||||
"docs = [\n",
|
||||
" Document(\n",
|
||||
" page_content=\"First\",\n",
|
||||
" metadata={\"name\": \"adam\", \"is_active\": True, \"id\": 1, \"height\": 10.0},\n",
|
||||
" metadata={\"name\": \"Adam Smith\", \"is_active\": True, \"id\": 1, \"height\": 10.0},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Second\",\n",
|
||||
" metadata={\"name\": \"bob\", \"is_active\": False, \"id\": 2, \"height\": 5.7},\n",
|
||||
" metadata={\"name\": \"Bob Johnson\", \"is_active\": False, \"id\": 2, \"height\": 5.7},\n",
|
||||
" ),\n",
|
||||
" Document(\n",
|
||||
" page_content=\"Third\",\n",
|
||||
" metadata={\"name\": \"jane\", \"is_active\": True, \"id\": 3, \"height\": 2.4},\n",
|
||||
" metadata={\"name\": \"Jane Doe\", \"is_active\": True, \"id\": 3, \"height\": 2.4},\n",
|
||||
" ),\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
@@ -632,7 +676,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -640,19 +684,19 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Filter: {'id': {'$ne': 1}}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"{'name': 'jane', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Jane Doe', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'id': {'$gt': 1}}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"{'name': 'jane', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Jane Doe', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'id': {'$gte': 1}}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"{'name': 'jane', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'Jane Doe', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'id': {'$lt': 1}}\n",
|
||||
"<empty result>\n",
|
||||
"Filter: {'id': {'$lte': 1}}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n"
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -687,7 +731,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -695,13 +739,13 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Filter: {'id': {'$between': (1, 2)}}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'name': {'$in': ['adam', 'bob']}}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'name': {'$nin': ['adam', 'bob']}}\n",
|
||||
"{'name': 'jane', 'is_active': True, 'id': 3, 'height': 2.4}\n"
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'name': {'$in': ['Adam Smith', 'Bob Johnson']}}\n",
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'name': {'$nin': ['Adam Smith', 'Bob Johnson']}}\n",
|
||||
"{'name': 'Jane Doe', 'is_active': True, 'id': 3, 'height': 2.4}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -710,11 +754,11 @@
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))\n",
|
||||
"\n",
|
||||
"advanced_filter = {\"name\": {\"$in\": [\"adam\", \"bob\"]}}\n",
|
||||
"advanced_filter = {\"name\": {\"$in\": [\"Adam Smith\", \"Bob Johnson\"]}}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))\n",
|
||||
"\n",
|
||||
"advanced_filter = {\"name\": {\"$nin\": [\"adam\", \"bob\"]}}\n",
|
||||
"advanced_filter = {\"name\": {\"$nin\": [\"Adam Smith\", \"Bob Johnson\"]}}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))"
|
||||
]
|
||||
@@ -728,7 +772,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -736,10 +780,10 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Filter: {'name': {'$like': 'a%'}}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"<empty result>\n",
|
||||
"Filter: {'name': {'$like': '%a%'}}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'jane', 'is_active': True, 'id': 3, 'height': 2.4}\n"
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'Jane Doe', 'is_active': True, 'id': 3, 'height': 2.4}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -753,6 +797,51 @@
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Text filtering with `$contains`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Filter: {'name': {'$contains': 'bob'}}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'name': {'$contains': 'bo'}}\n",
|
||||
"<empty result>\n",
|
||||
"Filter: {'name': {'$contains': 'Adam Johnson'}}\n",
|
||||
"<empty result>\n",
|
||||
"Filter: {'name': {'$contains': 'Adam Smith'}}\n",
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"advanced_filter = {\"name\": {\"$contains\": \"bob\"}}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))\n",
|
||||
"\n",
|
||||
"advanced_filter = {\"name\": {\"$contains\": \"bo\"}}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))\n",
|
||||
"\n",
|
||||
"advanced_filter = {\"name\": {\"$contains\": \"Adam Johnson\"}}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))\n",
|
||||
"\n",
|
||||
"advanced_filter = {\"name\": {\"$contains\": \"Adam Smith\"}}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@@ -762,7 +851,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -770,14 +859,15 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Filter: {'$or': [{'id': 1}, {'name': 'bob'}]}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"Filter: {'$and': [{'id': 1}, {'id': 2}]}\n",
|
||||
"<empty result>\n",
|
||||
"Filter: {'$or': [{'id': 1}, {'id': 2}, {'id': 3}]}\n",
|
||||
"{'name': 'adam', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'bob', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"{'name': 'jane', 'is_active': True, 'id': 3, 'height': 2.4}\n"
|
||||
"{'name': 'Adam Smith', 'is_active': True, 'id': 1, 'height': 10.0}\n",
|
||||
"{'name': 'Jane Doe', 'is_active': True, 'id': 3, 'height': 2.4}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n",
|
||||
"Filter: {'$and': [{'name': {'$contains': 'bob'}}, {'name': {'$contains': 'johnson'}}]}\n",
|
||||
"{'name': 'Bob Johnson', 'is_active': False, 'id': 2, 'height': 5.7}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -792,6 +882,12 @@
|
||||
"\n",
|
||||
"advanced_filter = {\"$or\": [{\"id\": 1}, {\"id\": 2}, {\"id\": 3}]}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))\n",
|
||||
"\n",
|
||||
"advanced_filter = {\n",
|
||||
" \"$and\": [{\"name\": {\"$contains\": \"bob\"}}, {\"name\": {\"$contains\": \"johnson\"}}]\n",
|
||||
"}\n",
|
||||
"print(f\"Filter: {advanced_filter}\")\n",
|
||||
"print_filter_result(db.similarity_search(\"just testing\", k=5, filter=advanced_filter))"
|
||||
]
|
||||
},
|
||||
@@ -804,13 +900,10 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 29,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Access the vector DB with a new table\n",
|
||||
"db = HanaDB(\n",
|
||||
" connection=connection,\n",
|
||||
@@ -837,7 +930,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -874,6 +967,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
|
||||
"memory = ConversationBufferMemory(\n",
|
||||
@@ -898,7 +993,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -907,7 +1002,7 @@
|
||||
"text": [
|
||||
"Answer from LLM:\n",
|
||||
"================\n",
|
||||
"The United States has set up joint patrols with Mexico and Guatemala to catch more human traffickers. This collaboration is part of the efforts to address immigration issues and secure the borders in the region.\n",
|
||||
"The United States has set up joint patrols with Mexico and Guatemala to catch more human traffickers at the border. This collaborative effort aims to improve border security and combat illegal activities such as human trafficking.\n",
|
||||
"================\n",
|
||||
"Number of used source document chunks: 5\n"
|
||||
]
|
||||
@@ -954,7 +1049,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -963,12 +1058,12 @@
|
||||
"text": [
|
||||
"Answer from LLM:\n",
|
||||
"================\n",
|
||||
"Mexico and Guatemala are involved in joint patrols to catch human traffickers.\n"
|
||||
"Countries like Mexico and Guatemala are participating in joint patrols to catch human traffickers. The United States is also working with partners in South and Central America to host more refugees and secure their borders. Additionally, the U.S. is working with twenty-seven members of the European Union, as well as countries like France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and Switzerland.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"question = \"What about other countries?\"\n",
|
||||
"question = \"How many casualties were reported after that?\"\n",
|
||||
"\n",
|
||||
"result = qa_chain.invoke({\"question\": question})\n",
|
||||
"print(\"Answer from LLM:\")\n",
|
||||
@@ -996,7 +1091,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1005,7 +1100,7 @@
|
||||
"[]"
|
||||
]
|
||||
},
|
||||
"execution_count": 35,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -1038,7 +1133,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1101,7 +1196,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1111,7 +1206,7 @@
|
||||
"None\n",
|
||||
"Some other text\n",
|
||||
"{\"start\": 400, \"end\": 450, \"doc_name\": \"other.txt\"}\n",
|
||||
"<memory at 0x7f5edcb18d00>\n"
|
||||
"<memory at 0x110f856c0>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1168,7 +1263,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1176,9 +1271,9 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Some other text\n",
|
||||
"Some more text\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Some more text\n"
|
||||
"Some other text\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1214,7 +1309,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"execution_count": 34,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1224,7 +1319,7 @@
|
||||
"Filters on this value are very performant\n",
|
||||
"Some other text\n",
|
||||
"{\"start\": 400, \"end\": 450, \"doc_name\": \"other.txt\", \"CUSTOMTEXT\": \"Filters on this value are very performant\"}\n",
|
||||
"<memory at 0x7f5edcb193c0>\n"
|
||||
"<memory at 0x110f859c0>\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1291,7 +1386,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"execution_count": 35,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -1299,9 +1394,9 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Some other text\n",
|
||||
"Some more text\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"Some more text\n"
|
||||
"Some other text\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -1330,9 +1425,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "lc3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "your_env_name"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -1344,7 +1439,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
"version": "3.10.16"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "39f3ce3e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -118,15 +118,13 @@
|
||||
" language: str = Field(description=\"The language the text is written in\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# LLM\n",
|
||||
"llm = ChatOpenAI(temperature=0, model=\"gpt-4o-mini\").with_structured_output(\n",
|
||||
" Classification\n",
|
||||
")"
|
||||
"# Structured LLM\n",
|
||||
"structured_llm = llm.with_structured_output(Classification)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": null,
|
||||
"id": "5509b6a6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -144,7 +142,7 @@
|
||||
"source": [
|
||||
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
|
||||
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
||||
"response = llm.invoke(prompt)\n",
|
||||
"response = structured_llm.invoke(prompt)\n",
|
||||
"\n",
|
||||
"response"
|
||||
]
|
||||
@@ -159,7 +157,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": null,
|
||||
"id": "9154474c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -177,7 +175,7 @@
|
||||
"source": [
|
||||
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
|
||||
"prompt = tagging_prompt.invoke({\"input\": inp})\n",
|
||||
"response = llm.invoke(prompt)\n",
|
||||
"response = structured_llm.invoke(prompt)\n",
|
||||
"\n",
|
||||
"response.model_dump()"
|
||||
]
|
||||
|
||||
@@ -145,15 +145,12 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "a5e490f6-35ad-455e-8ae4-2bae021583ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from pydantic import BaseModel, Field\n",
|
||||
"\n",
|
||||
"# Define a custom prompt to provide instructions and any additional context.\n",
|
||||
"# 1) You can add examples into the prompt template to improve extraction quality\n",
|
||||
|
||||
@@ -366,6 +366,12 @@ const FEATURE_TABLES = {
|
||||
package: "langchain-openai",
|
||||
apiLink: "https://python.langchain.com/api_reference/openai/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html"
|
||||
},
|
||||
{
|
||||
name: "Google Gemini",
|
||||
link: "google-generative-ai",
|
||||
package: "langchain-google-genai",
|
||||
apiLink: "https://python.langchain.com/api_reference/google_genai/embeddings/langchain_google_genai.embeddings.GoogleGenerativeAIEmbeddings.html"
|
||||
},
|
||||
{
|
||||
name: "Together",
|
||||
link: "together",
|
||||
|
||||
1
docs/static/js/google_analytics.js
vendored
1
docs/static/js/google_analytics.js
vendored
@@ -3,3 +3,4 @@ function gtag(){dataLayer.push(arguments);}
|
||||
gtag('js', new Date());
|
||||
|
||||
gtag('config', 'G-9B66JQQH2F');
|
||||
gtag('config', 'G-47WX3HKKY2');
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
httpx
|
||||
grpcio
|
||||
aiohttp<3.11
|
||||
protobuf<3.21
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Iterator, Optional, Sequence
|
||||
from typing import Any, Dict, Iterator, Optional, Sequence
|
||||
|
||||
from langchain_core.documents import Document
|
||||
|
||||
@@ -8,7 +8,7 @@ from langchain_community.document_loaders.base import BaseLoader
|
||||
class BrowserbaseLoader(BaseLoader):
|
||||
"""Load pre-rendered web pages using a headless browser hosted on Browserbase.
|
||||
|
||||
Depends on `browserbase` package.
|
||||
Depends on `browserbase` and `playwright` packages.
|
||||
Get your API key from https://browserbase.com
|
||||
"""
|
||||
|
||||
@@ -24,6 +24,7 @@ class BrowserbaseLoader(BaseLoader):
|
||||
self.urls = urls
|
||||
self.text_content = text_content
|
||||
self.session_id = session_id
|
||||
self.project_id = project_id
|
||||
self.proxy = proxy
|
||||
|
||||
try:
|
||||
@@ -32,22 +33,57 @@ class BrowserbaseLoader(BaseLoader):
|
||||
raise ImportError(
|
||||
"You must run "
|
||||
"`pip install --upgrade "
|
||||
"browserbase` "
|
||||
"browserbase playwright` "
|
||||
"to use the Browserbase loader."
|
||||
)
|
||||
|
||||
self.browserbase = Browserbase(api_key, project_id)
|
||||
self.browserbase = Browserbase(api_key=api_key)
|
||||
|
||||
def lazy_load(self) -> Iterator[Document]:
|
||||
"""Load pages from URLs"""
|
||||
pages = self.browserbase.load_urls(
|
||||
self.urls, self.text_content, self.session_id, self.proxy
|
||||
)
|
||||
|
||||
for i, page in enumerate(pages):
|
||||
yield Document(
|
||||
page_content=page,
|
||||
metadata={
|
||||
"url": self.urls[i],
|
||||
},
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"playwright is required for BrowserbaseLoader. "
|
||||
"Please run `pip install --upgrade playwright`."
|
||||
)
|
||||
|
||||
for url in self.urls:
|
||||
with sync_playwright() as playwright:
|
||||
# Create or use existing session
|
||||
if self.session_id:
|
||||
session = self.browserbase.sessions.retrieve(id=self.session_id)
|
||||
else:
|
||||
if not self.project_id:
|
||||
raise ValueError("project_id is required to create a session")
|
||||
session_params: Dict[str, Any] = {"project_id": self.project_id}
|
||||
if self.proxy is not None:
|
||||
session_params["proxy"] = bool(self.proxy)
|
||||
session = self.browserbase.sessions.create(**session_params)
|
||||
|
||||
# Connect to the remote session
|
||||
browser = playwright.chromium.connect_over_cdp(session.connect_url)
|
||||
context = browser.contexts[0]
|
||||
page = context.pages[0]
|
||||
|
||||
# Navigate to URL and get content
|
||||
page.goto(url)
|
||||
# Get content based on the text_content flag
|
||||
if self.text_content:
|
||||
page_text = page.inner_text("body")
|
||||
content = str(page_text)
|
||||
else:
|
||||
page_html = page.content()
|
||||
content = str(page_html)
|
||||
|
||||
# Close browser
|
||||
page.close()
|
||||
browser.close()
|
||||
|
||||
yield Document(
|
||||
page_content=content,
|
||||
metadata={
|
||||
"url": url,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# HANA Translator/query constructor
|
||||
from typing import Dict, Tuple, Union
|
||||
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.structured_query import (
|
||||
Comparator,
|
||||
Comparison,
|
||||
@@ -11,8 +12,25 @@ from langchain_core.structured_query import (
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.3.23",
|
||||
removal="1.0",
|
||||
message=(
|
||||
"This class is deprecated and will be removed in a future version. "
|
||||
"Please use query_constructors.HanaTranslator from the "
|
||||
"langchain_hana package instead. "
|
||||
"See https://github.com/SAP/langchain-integration-for-sap-hana-cloud "
|
||||
"for details."
|
||||
),
|
||||
alternative="from langchain_hana.query_constructors import HanaTranslator;",
|
||||
pending=False,
|
||||
)
|
||||
class HanaTranslator(Visitor):
|
||||
"""
|
||||
**DEPRECATED**: This class is deprecated and will no longer be maintained.
|
||||
Please use query_constructors.HanaTranslator from the langchain_hana
|
||||
package instead. It offers an improved implementation and full support.
|
||||
|
||||
Translate internal query language elements to valid filters params for
|
||||
HANA vectorstore.
|
||||
"""
|
||||
|
||||
@@ -19,6 +19,7 @@ from typing import (
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
from langchain_core._api import deprecated
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.runnables.config import run_in_executor
|
||||
@@ -66,9 +67,25 @@ default_vector_column: str = "VEC_VECTOR"
|
||||
default_vector_column_length: int = -1 # -1 means dynamic length
|
||||
|
||||
|
||||
@deprecated(
|
||||
since="0.3.23",
|
||||
removal="1.0",
|
||||
message=(
|
||||
"This class is deprecated and will be removed in a future version. "
|
||||
"Please use HanaDB from the langchain_hana package instead. "
|
||||
"See https://github.com/SAP/langchain-integration-for-sap-hana-cloud "
|
||||
"for details."
|
||||
),
|
||||
alternative="from langchain_hana import HanaDB;",
|
||||
pending=False,
|
||||
)
|
||||
class HanaDB(VectorStore):
|
||||
"""SAP HANA Cloud Vector Engine
|
||||
|
||||
**DEPRECATED**: This class is deprecated and will no longer be maintained.
|
||||
Please use HanaDB from the langchain_hana package instead. It offers an
|
||||
improved implementation and full support.
|
||||
|
||||
The prerequisite for using this class is the installation of the ``hdbcli``
|
||||
Python package.
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""langchain-core version information and utilities."""
|
||||
|
||||
VERSION = "0.3.56rc1"
|
||||
VERSION = "0.3.56"
|
||||
|
||||
@@ -17,7 +17,7 @@ dependencies = [
|
||||
"pydantic<3.0.0,>=2.7.4; python_full_version >= \"3.12.4\"",
|
||||
]
|
||||
name = "langchain-core"
|
||||
version = "0.3.56rc1"
|
||||
version = "0.3.56"
|
||||
description = "Building applications with LLMs through composability"
|
||||
readme = "README.md"
|
||||
|
||||
|
||||
2
libs/core/uv.lock
generated
2
libs/core/uv.lock
generated
@@ -937,7 +937,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.56rc1"
|
||||
version = "0.3.56"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
|
||||
@@ -584,6 +584,9 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
answer: Any = [
|
||||
msg_content for msg in new_messages for msg_content in msg.content
|
||||
]
|
||||
attachments = [
|
||||
attachment for msg in new_messages for attachment in msg.attachments
|
||||
]
|
||||
if all(
|
||||
(
|
||||
isinstance(content, openai.types.beta.threads.TextContentBlock)
|
||||
@@ -601,6 +604,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]):
|
||||
"output": answer,
|
||||
"thread_id": run.thread_id,
|
||||
"run_id": run.id,
|
||||
"attachments": attachments,
|
||||
},
|
||||
log="",
|
||||
run_id=run.id,
|
||||
|
||||
@@ -119,6 +119,7 @@ def test_configurable() -> None:
|
||||
"reasoning_effort": None,
|
||||
"frequency_penalty": None,
|
||||
"seed": None,
|
||||
"service_tier": None,
|
||||
"logprobs": None,
|
||||
"top_logprobs": None,
|
||||
"logit_bias": None,
|
||||
|
||||
@@ -85,7 +85,6 @@ packages:
|
||||
- name: langchain-milvus
|
||||
path: libs/milvus
|
||||
repo: langchain-ai/langchain-milvus
|
||||
disabled: true
|
||||
downloads: 207750
|
||||
downloads_updated_at: '2025-04-22T15:24:39.289813+00:00'
|
||||
- name: langchain-mistralai
|
||||
@@ -550,6 +549,7 @@ packages:
|
||||
- name: langchain-tavily
|
||||
path: .
|
||||
repo: tavily-ai/langchain-tavily
|
||||
include_in_api_ref: true
|
||||
downloads: 13796
|
||||
downloads_updated_at: '2025-04-22T15:25:24.644345+00:00'
|
||||
- name: langchain-zotero-retriever
|
||||
@@ -643,3 +643,10 @@ packages:
|
||||
repo: valyu-network/langchain-valyu
|
||||
downloads: 120
|
||||
downloads_updated_at: '2025-04-22T15:25:24.644345+00:00'
|
||||
- name: langchain-hana
|
||||
path: .
|
||||
repo: SAP/langchain-integration-for-sap-hana-cloud
|
||||
name_title: SAP HANA
|
||||
provider_page: sap
|
||||
downloads: 315
|
||||
downloads_updated_at: '2025-04-27T19:45:43.938924+00:00'
|
||||
|
||||
@@ -10,7 +10,7 @@ dependencies = [
|
||||
"langchain-core>=0.3.52",
|
||||
"numpy>=1.26.0; python_version < '3.13'",
|
||||
"numpy>=2.1.0; python_version >= '3.13'",
|
||||
"chromadb!=0.5.10,!=0.5.11,!=0.5.12,!=0.5.4,!=0.5.5,!=0.5.7,!=0.5.9,<0.7.0,>=0.4.0",
|
||||
"chromadb>=1.0,<=1.0.6",
|
||||
]
|
||||
name = "langchain-chroma"
|
||||
version = "0.2.3"
|
||||
|
||||
1673
libs/partners/chroma/uv.lock
generated
1673
libs/partners/chroma/uv.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -538,6 +538,10 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
However this does not prevent a user from directly passed in the parameter during
|
||||
invocation.
|
||||
"""
|
||||
service_tier: Optional[str] = None
|
||||
"""Latency tier for request. Options are 'auto', 'default', or 'flex'. Relevant
|
||||
for users of OpenAI's scale tier service.
|
||||
"""
|
||||
|
||||
use_responses_api: Optional[bool] = None
|
||||
"""Whether to use the Responses API instead of the Chat API.
|
||||
@@ -655,6 +659,7 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
"n": self.n,
|
||||
"temperature": self.temperature,
|
||||
"reasoning_effort": self.reasoning_effort,
|
||||
"service_tier": self.service_tier,
|
||||
}
|
||||
|
||||
params = {
|
||||
@@ -2326,6 +2331,27 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
||||
"logprobs": None,
|
||||
}
|
||||
|
||||
.. dropdown:: Flex processing
|
||||
|
||||
OpenAI offers a variety of
|
||||
`service tiers <https://platform.openai.com/docs/guides/flex-processing>`_.
|
||||
The "flex" tier offers cheaper pricing for requests, with the trade-off that
|
||||
responses may take longer and resources might not always be available.
|
||||
This approach is best suited for non-critical tasks, including model testing,
|
||||
data enhancement, or jobs that can be run asynchronously.
|
||||
|
||||
To use it, initialize the model with ``service_tier="flex"``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
|
||||
|
||||
Note that this is a beta feature that is only available for a subset of models.
|
||||
See OpenAI `docs <https://platform.openai.com/docs/guides/flex-processing>`_
|
||||
for more detail.
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
max_tokens: Optional[int] = Field(default=None, alias="max_completion_tokens")
|
||||
@@ -3411,14 +3437,16 @@ def _convert_responses_chunk_to_generation_chunk(
|
||||
)
|
||||
elif chunk.type == "response.refusal.done":
|
||||
additional_kwargs["refusal"] = chunk.refusal
|
||||
elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning":
|
||||
additional_kwargs["reasoning"] = chunk.item.model_dump(
|
||||
exclude_none=True, mode="json"
|
||||
)
|
||||
elif chunk.type == "response.reasoning_summary_part.added":
|
||||
additional_kwargs["reasoning"] = {
|
||||
"type": "reasoning",
|
||||
"id": chunk.item_id,
|
||||
# langchain-core uses the `index` key to aggregate text blocks.
|
||||
"summary": [
|
||||
{"index": chunk.summary_index, "type": "summary_text", "text": ""}
|
||||
],
|
||||
]
|
||||
}
|
||||
elif chunk.type == "response.reasoning_summary_text.delta":
|
||||
additional_kwargs["reasoning"] = {
|
||||
|
||||
@@ -517,7 +517,6 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
||||
_chunk_size = chunk_size or self.chunk_size
|
||||
_iter, tokens, indices = self._tokenize(texts, _chunk_size)
|
||||
batched_embeddings: list[list[float]] = []
|
||||
_chunk_size = chunk_size or self.chunk_size
|
||||
for i in range(0, len(tokens), _chunk_size):
|
||||
response = await self.async_client.create(
|
||||
input=tokens[i : i + _chunk_size], **self._invocation_params
|
||||
|
||||
@@ -170,48 +170,6 @@ def test_chat_openai_invalid_streaming_params() -> None:
|
||||
ChatOpenAI(max_tokens=MAX_TOKEN_COUNT, streaming=True, temperature=0, n=5) # type: ignore[call-arg]
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai() -> None:
|
||||
"""Test async generation."""
|
||||
chat = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT, n=2) # type: ignore[call-arg]
|
||||
message = HumanMessage(content="Hello")
|
||||
response = await chat.agenerate([[message], [message]])
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.generations) == 2
|
||||
assert response.llm_output
|
||||
for generations in response.generations:
|
||||
assert len(generations) == 2
|
||||
for generation in generations:
|
||||
assert isinstance(generation, ChatGeneration)
|
||||
assert isinstance(generation.text, str)
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai_streaming() -> None:
|
||||
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
||||
callback_handler = FakeCallbackHandler()
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
chat = ChatOpenAI(
|
||||
max_tokens=MAX_TOKEN_COUNT, # type: ignore[call-arg]
|
||||
streaming=True,
|
||||
temperature=0,
|
||||
callback_manager=callback_manager,
|
||||
verbose=True,
|
||||
)
|
||||
message = HumanMessage(content="Hello")
|
||||
response = await chat.agenerate([[message], [message]])
|
||||
assert callback_handler.llm_streams > 0
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.generations) == 2
|
||||
for generations in response.generations:
|
||||
assert len(generations) == 1
|
||||
for generation in generations:
|
||||
assert isinstance(generation, ChatGeneration)
|
||||
assert isinstance(generation.text, str)
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_async_chat_openai_bind_functions() -> None:
|
||||
"""Test ChatOpenAI wrapper with multiple completions."""
|
||||
@@ -244,34 +202,6 @@ async def test_async_chat_openai_bind_functions() -> None:
|
||||
assert isinstance(generation, AIMessage)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_openai_streaming() -> None:
|
||||
"""Test streaming tokens from OpenAI."""
|
||||
llm = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
|
||||
|
||||
for token in llm.stream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_openai_astream() -> None:
|
||||
"""Test streaming tokens from OpenAI."""
|
||||
llm = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
|
||||
|
||||
async for token in llm.astream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_openai_abatch() -> None:
|
||||
"""Test streaming tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
|
||||
|
||||
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.parametrize("use_responses_api", [False, True])
|
||||
async def test_openai_abatch_tags(use_responses_api: bool) -> None:
|
||||
@@ -285,31 +215,15 @@ async def test_openai_abatch_tags(use_responses_api: bool) -> None:
|
||||
assert isinstance(token.text(), str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_openai_batch() -> None:
|
||||
"""Test batch tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
|
||||
|
||||
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
async def test_openai_ainvoke() -> None:
|
||||
"""Test invoke tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
|
||||
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.flaky(retries=3, delay=1)
|
||||
def test_openai_invoke() -> None:
|
||||
"""Test invoke tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI(max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
|
||||
llm = ChatOpenAI(
|
||||
model="o4-mini",
|
||||
service_tier="flex", # Also test service_tier
|
||||
)
|
||||
|
||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
||||
result = llm.invoke("Hello", config=dict(tags=["foo"]))
|
||||
assert isinstance(result.content, str)
|
||||
|
||||
# assert no response headers if include_response_headers is not set
|
||||
@@ -413,15 +327,6 @@ async def test_astream() -> None:
|
||||
await _test_stream(llm.astream("Hello", stream_usage=False), expect_usage=False)
|
||||
|
||||
|
||||
async def test_abatch() -> None:
|
||||
"""Test streaming tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI()
|
||||
|
||||
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
async def test_abatch_tags() -> None:
|
||||
"""Test batch tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI()
|
||||
@@ -433,33 +338,6 @@ async def test_abatch_tags() -> None:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
def test_batch() -> None:
|
||||
"""Test batch tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI()
|
||||
|
||||
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
async def test_ainvoke() -> None:
|
||||
"""Test invoke tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI()
|
||||
|
||||
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
assert result.response_metadata.get("model_name") is not None
|
||||
|
||||
|
||||
def test_invoke() -> None:
|
||||
"""Test invoke tokens from ChatOpenAI."""
|
||||
llm = ChatOpenAI()
|
||||
|
||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
||||
assert isinstance(result.content, str)
|
||||
assert result.response_metadata.get("model_name") is not None
|
||||
|
||||
|
||||
def test_response_metadata() -> None:
|
||||
llm = ChatOpenAI()
|
||||
result = llm.invoke([HumanMessage(content="I'm PickleRick")], logprobs=True)
|
||||
@@ -985,45 +863,6 @@ def test_json_schema_openai_format(
|
||||
assert isinstance(result, dict)
|
||||
|
||||
|
||||
def test_json_mode() -> None:
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
response = llm.invoke(
|
||||
"Return this as json: {'a': 1}. Do not return anything other than json. Do not include markdown codeblocks.", # noqa: E501
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
assert isinstance(response.content, str)
|
||||
assert json.loads(response.content) == {"a": 1}
|
||||
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream(
|
||||
"Return this as json: {'a': 1}", response_format={"type": "json_object"}
|
||||
):
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert isinstance(full.content, str)
|
||||
assert json.loads(full.content) == {"a": 1}
|
||||
|
||||
|
||||
async def test_json_mode_async() -> None:
|
||||
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
||||
response = await llm.ainvoke(
|
||||
"Return this as json: {'a': 1}. Do not return anything other than json. Do not include markdown codeblocks." # noqa: E501
|
||||
)
|
||||
assert isinstance(response.content, str)
|
||||
assert json.loads(response.content) == {"a": 1}
|
||||
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
async for chunk in llm.astream(
|
||||
"Return this as json: {'a': 1}", response_format={"type": "json_object"}
|
||||
):
|
||||
full = chunk if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert isinstance(full.content, str)
|
||||
assert json.loads(full.content) == {"a": 1}
|
||||
|
||||
|
||||
def test_audio_output_modality() -> None:
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-4o-audio-preview",
|
||||
|
||||
@@ -1732,3 +1732,9 @@ def test__construct_responses_api_input_multiple_message_types() -> None:
|
||||
|
||||
# assert no mutation has occurred
|
||||
assert messages_copy == messages
|
||||
|
||||
|
||||
def test_service_tier() -> None:
|
||||
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
|
||||
payload = llm._get_request_payload([HumanMessage("Hello")])
|
||||
assert payload["service_tier"] == "flex"
|
||||
|
||||
@@ -476,6 +476,25 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
(OpenAI Chat Completions format), as well as
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
ToolMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": image_data,
|
||||
"mime_type": "image/jpeg",
|
||||
},
|
||||
],
|
||||
tool_call_id="1",
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
(standard format).
|
||||
|
||||
If set to ``True``, the chat model will be tested with message sequences that
|
||||
include ToolMessages of this form.
|
||||
|
||||
@@ -2254,6 +2273,26 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
containing image content blocks in OpenAI Chat Completions format, in addition
|
||||
to messages of the form:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
ToolMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": image_data,
|
||||
"mime_type": "image/jpeg",
|
||||
},
|
||||
],
|
||||
tool_call_id="1",
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
containing image content blocks in standard format.
|
||||
|
||||
This test can be skipped by setting the ``supports_image_tool_message`` property
|
||||
to False (see Configuration below).
|
||||
|
||||
@@ -2280,31 +2319,56 @@ class ChatModelIntegrationTests(ChatModelTests):
|
||||
pytest.skip("Model does not support image tool message.")
|
||||
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
||||
messages = [
|
||||
HumanMessage("get a random image using the tool and describe the weather"),
|
||||
AIMessage(
|
||||
[],
|
||||
tool_calls=[
|
||||
{"type": "tool_call", "id": "1", "name": "random_image", "args": {}}
|
||||
],
|
||||
),
|
||||
ToolMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
tool_call_id="1",
|
||||
name="random_image",
|
||||
),
|
||||
]
|
||||
|
||||
def random_image() -> str:
|
||||
"""Return a random image."""
|
||||
return ""
|
||||
# Support both OpenAI and standard formats
|
||||
oai_format_message = ToolMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
||||
},
|
||||
],
|
||||
tool_call_id="1",
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
model.bind_tools([random_image]).invoke(messages)
|
||||
standard_format_message = ToolMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": image_data,
|
||||
"mime_type": "image/jpeg",
|
||||
},
|
||||
],
|
||||
tool_call_id="1",
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
for tool_message in [oai_format_message, standard_format_message]:
|
||||
messages = [
|
||||
HumanMessage(
|
||||
"get a random image using the tool and describe the weather"
|
||||
),
|
||||
AIMessage(
|
||||
[],
|
||||
tool_calls=[
|
||||
{
|
||||
"type": "tool_call",
|
||||
"id": "1",
|
||||
"name": "random_image",
|
||||
"args": {},
|
||||
}
|
||||
],
|
||||
),
|
||||
tool_message,
|
||||
]
|
||||
|
||||
def random_image() -> str:
|
||||
"""Return a random image."""
|
||||
return ""
|
||||
|
||||
_ = model.bind_tools([random_image]).invoke(messages)
|
||||
|
||||
def test_anthropic_inputs(self, model: BaseChatModel) -> None:
|
||||
"""Test that model can process Anthropic-style message histories.
|
||||
|
||||
@@ -568,6 +568,25 @@ class ChatModelUnitTests(ChatModelTests):
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
(OpenAI Chat Completions format), as well as
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
ToolMessage(
|
||||
content=[
|
||||
{
|
||||
"type": "image",
|
||||
"source_type": "base64",
|
||||
"data": image_data,
|
||||
"mime_type": "image/jpeg",
|
||||
},
|
||||
],
|
||||
tool_call_id="1",
|
||||
name="random_image",
|
||||
)
|
||||
|
||||
(standard format).
|
||||
|
||||
If set to ``True``, the chat model will be tested with message sequences that
|
||||
include ToolMessages of this form.
|
||||
|
||||
|
||||
8
uv.lock
generated
8
uv.lock
generated
@@ -1,4 +1,5 @@
|
||||
version = 1
|
||||
revision = 1
|
||||
requires-python = ">=3.9, <4.0"
|
||||
resolution-markers = [
|
||||
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
|
||||
@@ -2178,7 +2179,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain"
|
||||
version = "0.3.23"
|
||||
version = "0.3.24"
|
||||
source = { editable = "libs/langchain" }
|
||||
dependencies = [
|
||||
{ name = "async-timeout", marker = "python_full_version < '3.11'" },
|
||||
@@ -2219,6 +2220,7 @@ requires-dist = [
|
||||
{ name = "requests", specifier = ">=2,<3" },
|
||||
{ name = "sqlalchemy", specifier = ">=1.4,<3" },
|
||||
]
|
||||
provides-extras = ["community", "anthropic", "openai", "azure-ai", "cohere", "google-vertexai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "aws", "deepseek", "xai", "perplexity"]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
@@ -2393,7 +2395,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-community"
|
||||
version = "0.3.21"
|
||||
version = "0.3.22"
|
||||
source = { editable = "libs/community" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
@@ -2484,7 +2486,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.54"
|
||||
version = "0.3.55"
|
||||
source = { editable = "libs/core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
|
||||
Reference in New Issue
Block a user