diff --git a/.github/workflows/.codespell-exclude b/.github/workflows/.codespell-exclude
deleted file mode 100644
index 902cb55cb04..00000000000
--- a/.github/workflows/.codespell-exclude
+++ /dev/null
@@ -1,6 +0,0 @@
-"NotIn": "not in",
-- `/checkin`: Check-in
-docs/docs/integrations/providers/trulens.mdx
-self.assertIn(
-from trulens_eval import Tru
-tru = Tru()
diff --git a/.github/workflows/_integration_test.yml b/.github/workflows/_integration_test.yml
index 48e277e936a..a1f7f988fa0 100644
--- a/.github/workflows/_integration_test.yml
+++ b/.github/workflows/_integration_test.yml
@@ -1,4 +1,5 @@
name: '๐ Integration Tests'
+run-name: 'Test ${{ inputs.working-directory }} on Python ${{ inputs.python-version }}'
on:
workflow_dispatch:
@@ -11,6 +12,7 @@ on:
required: true
type: string
description: "Python version to use"
+ default: "3.11"
permissions:
contents: read
@@ -24,7 +26,7 @@ jobs:
run:
working-directory: ${{ inputs.working-directory }}
runs-on: ubuntu-latest
- name: '๐ Integration Tests (Python ${{ inputs.python-version }})'
+ name: 'Python ${{ inputs.python-version }}'
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml
index 283ba41bbae..93d1aea24f2 100644
--- a/.github/workflows/_release.yml
+++ b/.github/workflows/_release.yml
@@ -1,5 +1,5 @@
name: '๐ Package Release'
-run-name: '๐ Release ${{ inputs.working-directory }} by @${{ github.actor }}'
+run-name: 'Release ${{ inputs.working-directory }} ${{ inputs.release-version }}'
on:
workflow_call:
inputs:
@@ -14,6 +14,11 @@ on:
type: string
description: "From which folder this pipeline executes"
default: 'libs/langchain'
+ release-version:
+ required: true
+ type: string
+ default: '0.1.0'
+ description: "New version of package being released"
dangerous-nonmaster-release:
required: false
type: boolean
@@ -111,7 +116,7 @@ jobs:
# Look for the latest release of the same base version
REGEX="^$PKG_NAME==$BASE_VERSION\$"
PREV_TAG=$(git tag --sort=-creatordate | (grep -P "$REGEX" || true) | head -1)
-
+
# If no exact base version match, look for the latest release of any kind
if [ -z "$PREV_TAG" ]; then
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
@@ -122,7 +127,7 @@ jobs:
PREV_TAG="$PKG_NAME==${VERSION%.*}.$(( ${VERSION##*.} - 1 ))"; [[ "${VERSION##*.}" -eq 0 ]] && PREV_TAG=""
# backup case if releasing e.g. 0.3.0, looks up last release
- # note if last release (chronologically) was e.g. 0.1.47 it will get
+ # note if last release (chronologically) was e.g. 0.1.47 it will get
# that instead of the last 0.2 release
if [ -z "$PREV_TAG" ]; then
REGEX="^$PKG_NAME==\\d+\\.\\d+\\.\\d+\$"
@@ -484,7 +489,7 @@ jobs:
with:
name: dist
path: ${{ inputs.working-directory }}/dist/
-
+
- name: Create Tag
uses: ncipollo/release-action@v1
with:
diff --git a/.github/workflows/api_doc_build.yml b/.github/workflows/api_doc_build.yml
index c97b4e5e19e..c6c4c90edda 100644
--- a/.github/workflows/api_doc_build.yml
+++ b/.github/workflows/api_doc_build.yml
@@ -1,4 +1,5 @@
-name: '๐ API Documentation Build'
+name: '๐ API Docs'
+run-name: 'Build & Deploy API Reference'
# Runs daily or can be triggered manually for immediate updates
on:
@@ -51,7 +52,7 @@ jobs:
run: |
# Get unique repositories
REPOS=$(echo "$REPOS_UNSORTED" | sort -u)
-
+
# Checkout each unique repository
for repo in $REPOS; do
# Validate repository format (allow any org with proper format)
@@ -59,15 +60,15 @@ jobs:
echo "Error: Invalid repository format: $repo"
exit 1
fi
-
+
REPO_NAME=$(echo $repo | cut -d'/' -f2)
-
+
# Additional validation for repo name
if [[ ! "$REPO_NAME" =~ ^[a-zA-Z0-9_.-]+$ ]]; then
echo "Error: Invalid repository name: $REPO_NAME"
exit 1
fi
-
+
echo "Checking out $repo to $REPO_NAME"
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
done
diff --git a/.github/workflows/people.yml b/.github/workflows/people.yml
index fe6a56fc8f4..d47b4e73529 100644
--- a/.github/workflows/people.yml
+++ b/.github/workflows/people.yml
@@ -1,5 +1,6 @@
name: '๐ฅ LangChain People'
-
+run-name: 'Update People Data'
+# This workflow updates the LangChain People data by fetching the latest information from the LangChain Git
on:
schedule:
- cron: "0 14 1 * *"
diff --git a/.github/workflows/run_notebooks.yml b/.github/workflows/run_notebooks.yml
index fc5b4fab91a..8f5c194762a 100644
--- a/.github/workflows/run_notebooks.yml
+++ b/.github/workflows/run_notebooks.yml
@@ -1,5 +1,5 @@
-name: '๐ Run Documentation Notebooks'
-
+name: '๐ Validate Documentation Notebooks'
+run-name: 'Test notebooks in ${{ inputs.working-directory }}'
on:
workflow_dispatch:
inputs:
diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml
index 99662af8f0f..62ed8699ddd 100644
--- a/.github/workflows/scheduled_test.yml
+++ b/.github/workflows/scheduled_test.yml
@@ -1,4 +1,5 @@
name: 'โฐ Scheduled Integration Tests'
+run-name: "Run Integration Tests - ${{ inputs.working-directory-force || 'all libs' }} (Python ${{ inputs.python-version-force || '3.9, 3.11' }})"
on:
workflow_dispatch: # Allows maintainers to trigger the workflow manually in GitHub UI
@@ -161,7 +162,7 @@ jobs:
- name: '๐งน Clean up External Libraries'
# Clean up external libraries to avoid affecting git status check
- run: |
+ run: |
rm -rf \
langchain/libs/partners/google-genai \
langchain/libs/partners/google-vertexai \
diff --git a/README.md b/README.md
index 02a42335e83..fff4ced6472 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@
[](https://star-history.com/#langchain-ai/langchain)
[](https://github.com/langchain-ai/langchain/issues)
[](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchain)
-[
](https://codespaces.new/langchain-ai/langchain)
+[
](https://codespaces.new/langchain-ai/langchain)
[](https://twitter.com/langchainai)
[](https://codspeed.io/langchain-ai/langchain)
diff --git a/SECURITY.md b/SECURITY.md
index 215e71e838a..1a3ee073a06 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -32,7 +32,7 @@ LangChain is partnered with [huntr by Protect AI](https://huntr.com/) to provide
a bounty program for our open source projects.
Please report security vulnerabilities associated with the LangChain
-open source projects [here](https://huntr.com/bounties/disclose/?target=https%3A%2F%2Fgithub.com%2Flangchain-ai%2Flangchain&validSearch=true).
+open source projects at [huntr](https://huntr.com/bounties/disclose/?target=https%3A%2F%2Fgithub.com%2Flangchain-ai%2Flangchain&validSearch=true).
Before reporting a vulnerability, please review:
diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb
index 579f8c7dfe1..c085080a6e9 100644
--- a/cookbook/Multi_modal_RAG_google.ipynb
+++ b/cookbook/Multi_modal_RAG_google.ipynb
@@ -144,7 +144,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"id": "kWDWfSDBMPl8",
"metadata": {},
"outputs": [
@@ -185,7 +185,7 @@
" )\n",
" # Text summary chain\n",
" model = VertexAI(\n",
- " temperature=0, model_name=\"gemini-2.0-flash-lite-001\", max_tokens=1024\n",
+ " temperature=0, model_name=\"gemini-2.5-flash\", max_tokens=1024\n",
" ).with_fallbacks([empty_response])\n",
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
"\n",
@@ -235,7 +235,7 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": null,
"id": "PeK9bzXv3olF",
"metadata": {},
"outputs": [],
@@ -254,7 +254,7 @@
"\n",
"def image_summarize(img_base64, prompt):\n",
" \"\"\"Make image summary\"\"\"\n",
- " model = ChatVertexAI(model=\"gemini-2.0-flash\", max_tokens=1024)\n",
+ " model = ChatVertexAI(model=\"gemini-2.5-flash\", max_tokens=1024)\n",
"\n",
" msg = model.invoke(\n",
" [\n",
@@ -431,7 +431,7 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": null,
"id": "GlwCErBaCKQW",
"metadata": {},
"outputs": [],
@@ -553,7 +553,7 @@
" \"\"\"\n",
"\n",
" # Multi-modal LLM\n",
- " model = ChatVertexAI(temperature=0, model_name=\"gemini-2.0-flash\", max_tokens=1024)\n",
+ " model = ChatVertexAI(temperature=0, model_name=\"gemini-2.5-flash\", max_tokens=1024)\n",
"\n",
" # RAG pipeline\n",
" chain = (\n",
diff --git a/docs/docs/how_to/multimodal_inputs.ipynb b/docs/docs/how_to/multimodal_inputs.ipynb
index 96da487ed35..9c3b98d6d52 100644
--- a/docs/docs/how_to/multimodal_inputs.ipynb
+++ b/docs/docs/how_to/multimodal_inputs.ipynb
@@ -373,7 +373,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"id": "a0b91b29-dbd6-4c94-8f24-05471adc7598",
"metadata": {},
"outputs": [
@@ -397,7 +397,7 @@
"\n",
"\n",
"# Pass to LLM\n",
- "llm = init_chat_model(\"google_genai:gemini-2.0-flash-001\")\n",
+ "llm = init_chat_model(\"google_genai:gemini-2.5-flash\")\n",
"\n",
"message = {\n",
" \"role\": \"user\",\n",
diff --git a/docs/docs/how_to/response_metadata.ipynb b/docs/docs/how_to/response_metadata.ipynb
index a88f6f0535a..89bcbfb1e78 100644
--- a/docs/docs/how_to/response_metadata.ipynb
+++ b/docs/docs/how_to/response_metadata.ipynb
@@ -23,9 +23,9 @@
{
"data": {
"text/plain": [
- "{'token_usage': {'completion_tokens': 93,\n",
+ "{'token_usage': {'completion_tokens': 88,\n",
" 'prompt_tokens': 16,\n",
- " 'total_tokens': 109,\n",
+ " 'total_tokens': 104,\n",
" 'completion_tokens_details': {'accepted_prediction_tokens': 0,\n",
" 'audio_tokens': 0,\n",
" 'reasoning_tokens': 0,\n",
@@ -33,7 +33,7 @@
" 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}},\n",
" 'model_name': 'gpt-4o-mini-2024-07-18',\n",
" 'system_fingerprint': 'fp_34a54ae93c',\n",
- " 'id': 'chatcmpl-ByJtse6I3U1lmVyPscLCjzydCvfDO',\n",
+ " 'id': 'chatcmpl-ByN1Qkvqb5fAGKKzXXxZ3rBlnqkWs',\n",
" 'service_tier': 'default',\n",
" 'finish_reason': 'stop',\n",
" 'logprobs': None}"
@@ -69,14 +69,14 @@
{
"data": {
"text/plain": [
- "{'id': 'msg_017S9H7GMwA5RdZ1wHxzXoeX',\n",
+ "{'id': 'msg_01NTWnqvbNKSjGfqQL7xikau',\n",
" 'model': 'claude-3-7-sonnet-20250219',\n",
" 'stop_reason': 'end_turn',\n",
" 'stop_sequence': None,\n",
" 'usage': {'cache_creation_input_tokens': 0,\n",
" 'cache_read_input_tokens': 0,\n",
" 'input_tokens': 17,\n",
- " 'output_tokens': 180,\n",
+ " 'output_tokens': 197,\n",
" 'server_tool_use': None,\n",
" 'service_tier': 'standard'},\n",
" 'model_name': 'claude-3-7-sonnet-20250219'}"
@@ -100,30 +100,22 @@
"id": "c1f24f69-18f6-43c1-8b26-3f88ec515259",
"metadata": {},
"source": [
- "## Google VertexAI"
+ "## Google Generative AI"
]
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"id": "39549336-25f5-4839-9846-f687cd77e59b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "{'is_blocked': False,\n",
- " 'safety_ratings': [],\n",
- " 'usage_metadata': {'prompt_token_count': 10,\n",
- " 'candidates_token_count': 55,\n",
- " 'total_token_count': 65,\n",
- " 'prompt_tokens_details': [{'modality': 1, 'token_count': 10}],\n",
- " 'candidates_tokens_details': [{'modality': 1, 'token_count': 55}],\n",
- " 'cached_content_token_count': 0,\n",
- " 'cache_tokens_details': []},\n",
+ "{'prompt_feedback': {'block_reason': 0, 'safety_ratings': []},\n",
" 'finish_reason': 'STOP',\n",
- " 'avg_logprobs': -0.251378042047674,\n",
- " 'model_name': 'gemini-2.0-flash-001'}"
+ " 'model_name': 'gemini-2.5-flash',\n",
+ " 'safety_ratings': []}"
]
},
"execution_count": 1,
@@ -132,9 +124,9 @@
}
],
"source": [
- "from langchain_google_vertexai import ChatVertexAI\n",
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n",
- "llm = ChatVertexAI(model=\"gemini-2.0-flash-001\")\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\")\n",
"msg = llm.invoke(\"What's the oldest known example of cuneiform\")\n",
"msg.response_metadata"
]
@@ -199,14 +191,14 @@
"data": {
"text/plain": [
"{'token_usage': {'prompt_tokens': 13,\n",
- " 'total_tokens': 219,\n",
- " 'completion_tokens': 206},\n",
+ " 'total_tokens': 306,\n",
+ " 'completion_tokens': 293},\n",
" 'model_name': 'mistral-small-latest',\n",
" 'model': 'mistral-small-latest',\n",
" 'finish_reason': 'stop'}"
]
},
- "execution_count": 5,
+ "execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
diff --git a/docs/docs/integrations/chat/google_generative_ai.ipynb b/docs/docs/integrations/chat/google_generative_ai.ipynb
index ab9b0e2c2eb..8620197c9d8 100644
--- a/docs/docs/integrations/chat/google_generative_ai.ipynb
+++ b/docs/docs/integrations/chat/google_generative_ai.ipynb
@@ -19,7 +19,7 @@
"\n",
"Access Google's Generative AI models, including the Gemini family, directly via the Gemini API or experiment rapidly using Google AI Studio. The `langchain-google-genai` package provides the LangChain integration for these models. This is often the best starting point for individual developers.\n",
"\n",
- "For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All examples use the `gemini-2.0-flash` model. Gemini 2.5 Pro and 2.5 Flash can be used via `gemini-2.5-pro-preview-03-25` and `gemini-2.5-flash-preview-04-17`. All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n",
+ "For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n",
"\n",
"### Integration details\n",
"\n",
@@ -117,7 +117,7 @@
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n",
"llm = ChatGoogleGenerativeAI(\n",
- " model=\"gemini-2.0-flash\",\n",
+ " model=\"gemini-2.5-flash\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" timeout=None,\n",
@@ -242,7 +242,7 @@
"\n",
"### Image Input\n",
"\n",
- "Provide image inputs along with text using a `HumanMessage` with a list content format. The `gemini-2.0-flash` model can handle images."
+ "Provide image inputs along with text using a `HumanMessage` with a list content format. Make sure to use a model that supports image input, such as `gemini-2.5-flash`."
]
},
{
@@ -297,7 +297,7 @@
"\n",
"### Audio Input\n",
"\n",
- "Provide audio file inputs along with text. Use a model like `gemini-2.0-flash`."
+ "Provide audio file inputs along with text."
]
},
{
@@ -340,7 +340,7 @@
"source": [
"### Video Input\n",
"\n",
- "Provide video file inputs along with text. Use a model like `gemini-2.0-flash`."
+ "Provide video file inputs along with text."
]
},
{
@@ -384,7 +384,7 @@
"source": [
"### Image Generation (Multimodal Output)\n",
"\n",
- "The `gemini-2.0-flash` model can generate text and images inline (image generation is experimental). You need to specify the desired `response_modalities`."
+ "Certain models (such as `gemini-2.0-flash-preview-image-generation`) can generate text and images inline. You need to specify the desired `response_modalities`. See more information on the [Gemini API docs](https://ai.google.dev/gemini-api/docs/image-generation) for details."
]
},
{
@@ -830,7 +830,7 @@
"source": [
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n",
- "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\")\n",
"\n",
"\n",
"async def run_async_calls():\n",
@@ -900,7 +900,7 @@
"source": [
"## API reference\n",
"\n",
- "For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html"
+ "For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html)."
]
}
],
diff --git a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb
index 11f0fcac65e..b86f949aa40 100644
--- a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb
+++ b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb
@@ -19,7 +19,7 @@
"\n",
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
"\n",
- "ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
+ "ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-2.5-pro`, `gemini-2.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models).\n",
"\n",
":::info Google Cloud VertexAI vs Google PaLM\n",
"\n",
@@ -60,7 +60,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {},
"outputs": [],
@@ -109,7 +109,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {},
"outputs": [],
@@ -117,7 +117,7 @@
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
"llm = ChatVertexAI(\n",
- " model=\"gemini-1.5-flash-001\",\n",
+ " model=\"gemini-2.5-flash\",\n",
" temperature=0,\n",
" max_tokens=None,\n",
" max_retries=6,\n",
@@ -210,7 +210,7 @@
"source": [
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
- "llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"google_search\": {}}])\n",
+ "llm = ChatVertexAI(model=\"gemini-2.5-flash\").bind_tools([{\"google_search\": {}}])\n",
"\n",
"response = llm.invoke(\"What is today's news?\")"
]
@@ -237,7 +237,7 @@
"source": [
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
- "llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"code_execution\": {}}])\n",
+ "llm = ChatVertexAI(model=\"gemini-2.5-flash\").bind_tools([{\"code_execution\": {}}])\n",
"\n",
"response = llm.invoke(\"What is 3^3?\")"
]
diff --git a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb
index 624189296e8..0820687b64b 100644
--- a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb
+++ b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb
@@ -23,13 +23,9 @@
"\n",
"**Note:** This is separate from the `Google Generative AI` integration, it exposes [Vertex AI Generative API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`.\n",
"\n",
- "VertexAI exposes all foundational models available in google cloud:\n",
- "- Gemini for Text ( `gemini-1.0-pro` )\n",
- "- Gemini with Multimodality ( `gemini-1.5-pro-001` and `gemini-pro-vision`)\n",
- "- Palm 2 for Text (`text-bison`)\n",
- "- Codey for Code Generation (`code-bison`)\n",
+ "VertexAI exposes all foundational models available in google cloud.\n",
"\n",
- "For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview)"
+ "For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models)"
]
},
{
@@ -47,7 +43,7 @@
"\n",
"To use `Vertex AI Generative AI` you must have the `langchain-google-vertexai` Python package installed and either:\n",
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
- "- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
+ "- Store the path to a service account JSON file as the `GOOGLE_APPLICATION_CREDENTIALS` environment variable\n",
"\n",
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
"\n",
@@ -84,31 +80,14 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_vertexai import VertexAI\n",
"\n",
"# To use model\n",
- "model = VertexAI(model_name=\"gemini-pro\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "NOTE : You can also specify a [Gemini Version](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#gemini-model-versions)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [],
- "source": [
- "# To specify a particular model version\n",
- "model = VertexAI(model_name=\"gemini-1.0-pro-002\")"
+ "model = VertexAI(model_name=\"gemini-2.5-pro\")"
]
},
{
@@ -285,7 +264,7 @@
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": null,
"metadata": {},
"outputs": [
{
@@ -301,7 +280,7 @@
],
"source": [
"# You may also pass safety_settings to generate method\n",
- "llm = VertexAI(model_name=\"gemini-1.0-pro-001\")\n",
+ "llm = VertexAI(model_name=\"gemini-2.5-pro\")\n",
"\n",
"# invoke a model response\n",
"output = llm.invoke(\n",
@@ -622,15 +601,14 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
- "# Use Gemini 1.5 Pro\n",
- "llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
+ "llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
]
},
{
@@ -683,15 +661,14 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
- "# Use Gemini 1.5 Pro\n",
- "llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
+ "llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
]
},
{
@@ -741,20 +718,19 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Using Audio with Gemini 1.5 Pro"
+ "### Using Audio with Gemini Models"
]
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
- "# Use Gemini 1.5 Pro\n",
- "llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
+ "llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
]
},
{
@@ -1226,9 +1202,6 @@
"metadata": {},
"source": [
"NOTE : Specify the correct [Claude 3 Model Versions](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#claude-opus)\n",
- "- For Claude 3 Opus (Preview), use `claude-3-opus@20240229`.\n",
- "- For Claude 3 Sonnet, use `claude-3-sonnet@20240229`.\n",
- "- For Claude 3 Haiku, use `claude-3-haiku@20240307`.\n",
"\n",
"We don't recommend using the Anthropic Claude 3 model versions that don't include a suffix that starts with an @ symbol (claude-3-opus, claude-3-sonnet, or claude-3-haiku)."
]
diff --git a/docs/docs/integrations/providers/google.mdx b/docs/docs/integrations/providers/google.mdx
index 67bb994c7c3..33df8a6fea7 100644
--- a/docs/docs/integrations/providers/google.mdx
+++ b/docs/docs/integrations/providers/google.mdx
@@ -29,14 +29,14 @@ export GOOGLE_API_KEY="YOUR_API_KEY"
### Chat Models
-Use the `ChatGoogleGenerativeAI` class to interact with Gemini 2.0 and 2.5 models. See
+Use the `ChatGoogleGenerativeAI` class to interact with Gemini models. See
details in [this guide](/docs/integrations/chat/google_generative_ai).
```python
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage
-llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
+llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
# Simple text invocation
result = llm.invoke("Sing a ballad of LangChain.")
@@ -61,14 +61,14 @@ The `image_url` can be a public URL, a GCS URI (`gs://...`), a local file path,
### Embedding Models
-Generate text embeddings using models like `gemini-embedding-exp-03-07` with the `GoogleGenerativeAIEmbeddings` class.
+Generate text embeddings using models like `gemini-embedding-001` with the `GoogleGenerativeAIEmbeddings` class.
See a [usage example](/docs/integrations/text_embedding/google_generative_ai).
```python
from langchain_google_genai import GoogleGenerativeAIEmbeddings
-embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-exp-03-07")
+embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
vector = embeddings.embed_query("What are embeddings?")
print(vector[:5])
```
@@ -83,7 +83,7 @@ See a [usage example](/docs/integrations/llms/google_ai).
```python
from langchain_google_genai import GoogleGenerativeAI
-llm = GoogleGenerativeAI(model="gemini-2.0-flash")
+llm = GoogleGenerativeAI(model="gemini-2.5-flash")
result = llm.invoke("Sing a ballad of LangChain.")
print(result)
```
@@ -105,7 +105,7 @@ Google Cloud integrations typically use Application Default Credentials (ADC). R
#### Vertex AI
-Access chat models like `Gemini` via the Vertex AI platform.
+Access chat models like Gemini via the Vertex AI platform.
See a [usage example](/docs/integrations/chat/google_vertex_ai_palm).
@@ -135,7 +135,7 @@ from langchain_google_vertexai.model_garden_maas.mistral import VertexModelGarde
#### Gemma local from Hugging Face
->Local `Gemma` model loaded from `HuggingFace`. Requires `langchain-google-vertexai`.
+>Local Gemma model loaded from HuggingFace. Requires `langchain-google-vertexai`.
```python
from langchain_google_vertexai.gemma import GemmaChatLocalHF
@@ -143,7 +143,7 @@ from langchain_google_vertexai.gemma import GemmaChatLocalHF
#### Gemma local from Kaggle
->Local `Gemma` model loaded from `Kaggle`. Requires `langchain-google-vertexai`.
+>Local Gemma model loaded from Kaggle. Requires `langchain-google-vertexai`.
```python
from langchain_google_vertexai.gemma import GemmaChatLocalKaggle
@@ -159,7 +159,7 @@ from langchain_google_vertexai.gemma import GemmaChatVertexAIModelGarden
#### Vertex AI image captioning
->Implementation of the `Image Captioning model` as a chat. Requires `langchain-google-vertexai`.
+>Implementation of the Image Captioning model as a chat. Requires `langchain-google-vertexai`.
```python
from langchain_google_vertexai.vision_models import VertexAIImageCaptioningChat
@@ -196,7 +196,7 @@ interface.
#### Vertex AI Model Garden
-Access `Gemini`, and hundreds of OSS models via `Vertex AI Model Garden` service. Requires `langchain-google-vertexai`.
+Access Gemini, and hundreds of OSS models via Vertex AI Model Garden service. Requires `langchain-google-vertexai`.
See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model-garden).
@@ -206,7 +206,7 @@ from langchain_google_vertexai import VertexAIModelGarden
#### Gemma local from Hugging Face
->Local `Gemma` model loaded from `HuggingFace`. Requires `langchain-google-vertexai`.
+>Local Gemma model loaded from HuggingFace. Requires `langchain-google-vertexai`.
```python
from langchain_google_vertexai.gemma import GemmaLocalHF
@@ -214,7 +214,7 @@ from langchain_google_vertexai.gemma import GemmaLocalHF
#### Gemma local from Kaggle
->Local `Gemma` model loaded from `Kaggle`. Requires `langchain-google-vertexai`.
+>Local Gemma model loaded from Kaggle. Requires `langchain-google-vertexai`.
```python
from langchain_google_vertexai.gemma import GemmaLocalKaggle
@@ -230,7 +230,7 @@ from langchain_google_vertexai.gemma import GemmaVertexAIModelGarden
#### Vertex AI image captioning
->Implementation of the `Image Captioning model` as an LLM. Requires `langchain-google-vertexai`.
+>Implementation of the Image Captioning model as an LLM. Requires `langchain-google-vertexai`.
```python
from langchain_google_vertexai.vision_models import VertexAIImageCaptioning
@@ -1138,7 +1138,7 @@ Integrations with various Google services beyond the core Cloud Platform.
#### Google Drive
->[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) file storage. Currently supports `Google Docs`.
+>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) file storage. Currently supports Google Docs.
Install with Drive dependencies:
@@ -1416,7 +1416,7 @@ from langchain_community.utilities import GoogleSerperAPIWrapper
#### YouTube Search Tool
->Search `YouTube` videos without the official API. Requires `youtube_search` package.
+>Search YouTube videos without the official API. Requires `youtube_search` package.
```bash
pip install youtube_search langchain # Requires base langchain
diff --git a/docs/docs/integrations/text_embedding/google_generative_ai.ipynb b/docs/docs/integrations/text_embedding/google_generative_ai.ipynb
index 26a022c8f77..7afe3c0578c 100644
--- a/docs/docs/integrations/text_embedding/google_generative_ai.ipynb
+++ b/docs/docs/integrations/text_embedding/google_generative_ai.ipynb
@@ -101,7 +101,7 @@
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": null,
"id": "eedc551e-a1f3-4fd8-8d65-4e0784c4441b",
"metadata": {},
"outputs": [
@@ -123,7 +123,7 @@
"source": [
"from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
"\n",
- "embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-exp-03-07\")\n",
+ "embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-001\")\n",
"vector = embeddings.embed_query(\"hello, world!\")\n",
"vector[:5]"
]
@@ -245,7 +245,7 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": null,
"id": "f1f077db-8eb4-49f7-8866-471a8528dcdb",
"metadata": {},
"outputs": [
@@ -267,10 +267,10 @@
"from sklearn.metrics.pairwise import cosine_similarity\n",
"\n",
"query_embeddings = GoogleGenerativeAIEmbeddings(\n",
- " model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_QUERY\"\n",
+ " model=\"models/gemini-embedding-001\", task_type=\"RETRIEVAL_QUERY\"\n",
")\n",
"doc_embeddings = GoogleGenerativeAIEmbeddings(\n",
- " model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_DOCUMENT\"\n",
+ " model=\"models/gemini-embedding-001\", task_type=\"RETRIEVAL_DOCUMENT\"\n",
")\n",
"\n",
"q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n",
diff --git a/docs/docs/integrations/tools/brightdata-webscraperapi.ipynb b/docs/docs/integrations/tools/brightdata-webscraperapi.ipynb
index d0ece37ac7e..b3cf9830621 100644
--- a/docs/docs/integrations/tools/brightdata-webscraperapi.ipynb
+++ b/docs/docs/integrations/tools/brightdata-webscraperapi.ipynb
@@ -253,7 +253,7 @@
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"# Initialize the LLM\n",
- "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
"\n",
"# Initialize the Bright Data Web Scraper API tool\n",
"scraper_tool = BrightDataWebScraperAPI(bright_data_api_key=\"your-api-key\")\n",
diff --git a/docs/docs/integrations/tools/brightdata_serp.ipynb b/docs/docs/integrations/tools/brightdata_serp.ipynb
index 6b2d3ea8f89..0cbab0f5f21 100644
--- a/docs/docs/integrations/tools/brightdata_serp.ipynb
+++ b/docs/docs/integrations/tools/brightdata_serp.ipynb
@@ -233,7 +233,7 @@
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"# Initialize the LLM\n",
- "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
"\n",
"# Initialize the Bright Data SERP tool\n",
"serp_tool = BrightDataSERP(\n",
diff --git a/docs/docs/integrations/tools/brightdata_unlocker.ipynb b/docs/docs/integrations/tools/brightdata_unlocker.ipynb
index 90dd00abf03..019e28a1174 100644
--- a/docs/docs/integrations/tools/brightdata_unlocker.ipynb
+++ b/docs/docs/integrations/tools/brightdata_unlocker.ipynb
@@ -275,7 +275,7 @@
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"# Initialize the LLM\n",
- "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n",
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
"\n",
"# Initialize the tool\n",
"bright_data_tool = BrightDataUnlocker(bright_data_api_key=\"your-api-key\")\n",
diff --git a/docs/src/theme/ChatModelTabs.js b/docs/src/theme/ChatModelTabs.js
index bbac33385d1..c7c3cff3cf3 100644
--- a/docs/src/theme/ChatModelTabs.js
+++ b/docs/src/theme/ChatModelTabs.js
@@ -36,17 +36,17 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
return (
-
Select {text}:
-