fix: use new Google model names in examples (#32288)

This commit is contained in:
Mason Daugherty 2025-07-28 15:03:42 -04:00 committed by GitHub
parent 6f10160a45
commit 8db16b5633
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 308 additions and 354 deletions

View File

@ -144,7 +144,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": null,
"id": "kWDWfSDBMPl8", "id": "kWDWfSDBMPl8",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -185,7 +185,7 @@
" )\n", " )\n",
" # Text summary chain\n", " # Text summary chain\n",
" model = VertexAI(\n", " model = VertexAI(\n",
" temperature=0, model_name=\"gemini-2.0-flash-lite-001\", max_tokens=1024\n", " temperature=0, model_name=\"gemini-2.5-flash\", max_tokens=1024\n",
" ).with_fallbacks([empty_response])\n", " ).with_fallbacks([empty_response])\n",
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n", " summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
"\n", "\n",
@ -235,7 +235,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": null,
"id": "PeK9bzXv3olF", "id": "PeK9bzXv3olF",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -254,7 +254,7 @@
"\n", "\n",
"def image_summarize(img_base64, prompt):\n", "def image_summarize(img_base64, prompt):\n",
" \"\"\"Make image summary\"\"\"\n", " \"\"\"Make image summary\"\"\"\n",
" model = ChatVertexAI(model=\"gemini-2.0-flash\", max_tokens=1024)\n", " model = ChatVertexAI(model=\"gemini-2.5-flash\", max_tokens=1024)\n",
"\n", "\n",
" msg = model.invoke(\n", " msg = model.invoke(\n",
" [\n", " [\n",
@ -431,7 +431,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": null,
"id": "GlwCErBaCKQW", "id": "GlwCErBaCKQW",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -553,7 +553,7 @@
" \"\"\"\n", " \"\"\"\n",
"\n", "\n",
" # Multi-modal LLM\n", " # Multi-modal LLM\n",
" model = ChatVertexAI(temperature=0, model_name=\"gemini-2.0-flash\", max_tokens=1024)\n", " model = ChatVertexAI(temperature=0, model_name=\"gemini-2.5-flash\", max_tokens=1024)\n",
"\n", "\n",
" # RAG pipeline\n", " # RAG pipeline\n",
" chain = (\n", " chain = (\n",

View File

@ -373,7 +373,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": null,
"id": "a0b91b29-dbd6-4c94-8f24-05471adc7598", "id": "a0b91b29-dbd6-4c94-8f24-05471adc7598",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -397,7 +397,7 @@
"\n", "\n",
"\n", "\n",
"# Pass to LLM\n", "# Pass to LLM\n",
"llm = init_chat_model(\"google_genai:gemini-2.0-flash-001\")\n", "llm = init_chat_model(\"google_genai:gemini-2.5-flash\")\n",
"\n", "\n",
"message = {\n", "message = {\n",
" \"role\": \"user\",\n", " \"role\": \"user\",\n",

View File

@ -23,9 +23,9 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"{'token_usage': {'completion_tokens': 93,\n", "{'token_usage': {'completion_tokens': 88,\n",
" 'prompt_tokens': 16,\n", " 'prompt_tokens': 16,\n",
" 'total_tokens': 109,\n", " 'total_tokens': 104,\n",
" 'completion_tokens_details': {'accepted_prediction_tokens': 0,\n", " 'completion_tokens_details': {'accepted_prediction_tokens': 0,\n",
" 'audio_tokens': 0,\n", " 'audio_tokens': 0,\n",
" 'reasoning_tokens': 0,\n", " 'reasoning_tokens': 0,\n",
@ -33,7 +33,7 @@
" 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}},\n", " 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}},\n",
" 'model_name': 'gpt-4o-mini-2024-07-18',\n", " 'model_name': 'gpt-4o-mini-2024-07-18',\n",
" 'system_fingerprint': 'fp_34a54ae93c',\n", " 'system_fingerprint': 'fp_34a54ae93c',\n",
" 'id': 'chatcmpl-ByJtse6I3U1lmVyPscLCjzydCvfDO',\n", " 'id': 'chatcmpl-ByN1Qkvqb5fAGKKzXXxZ3rBlnqkWs',\n",
" 'service_tier': 'default',\n", " 'service_tier': 'default',\n",
" 'finish_reason': 'stop',\n", " 'finish_reason': 'stop',\n",
" 'logprobs': None}" " 'logprobs': None}"
@ -69,14 +69,14 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"{'id': 'msg_017S9H7GMwA5RdZ1wHxzXoeX',\n", "{'id': 'msg_01NTWnqvbNKSjGfqQL7xikau',\n",
" 'model': 'claude-3-7-sonnet-20250219',\n", " 'model': 'claude-3-7-sonnet-20250219',\n",
" 'stop_reason': 'end_turn',\n", " 'stop_reason': 'end_turn',\n",
" 'stop_sequence': None,\n", " 'stop_sequence': None,\n",
" 'usage': {'cache_creation_input_tokens': 0,\n", " 'usage': {'cache_creation_input_tokens': 0,\n",
" 'cache_read_input_tokens': 0,\n", " 'cache_read_input_tokens': 0,\n",
" 'input_tokens': 17,\n", " 'input_tokens': 17,\n",
" 'output_tokens': 180,\n", " 'output_tokens': 197,\n",
" 'server_tool_use': None,\n", " 'server_tool_use': None,\n",
" 'service_tier': 'standard'},\n", " 'service_tier': 'standard'},\n",
" 'model_name': 'claude-3-7-sonnet-20250219'}" " 'model_name': 'claude-3-7-sonnet-20250219'}"
@ -100,30 +100,22 @@
"id": "c1f24f69-18f6-43c1-8b26-3f88ec515259", "id": "c1f24f69-18f6-43c1-8b26-3f88ec515259",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Google VertexAI" "## Google Generative AI"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"id": "39549336-25f5-4839-9846-f687cd77e59b", "id": "39549336-25f5-4839-9846-f687cd77e59b",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"{'is_blocked': False,\n", "{'prompt_feedback': {'block_reason': 0, 'safety_ratings': []},\n",
" 'safety_ratings': [],\n",
" 'usage_metadata': {'prompt_token_count': 10,\n",
" 'candidates_token_count': 55,\n",
" 'total_token_count': 65,\n",
" 'prompt_tokens_details': [{'modality': 1, 'token_count': 10}],\n",
" 'candidates_tokens_details': [{'modality': 1, 'token_count': 55}],\n",
" 'cached_content_token_count': 0,\n",
" 'cache_tokens_details': []},\n",
" 'finish_reason': 'STOP',\n", " 'finish_reason': 'STOP',\n",
" 'avg_logprobs': -0.251378042047674,\n", " 'model_name': 'gemini-2.5-flash',\n",
" 'model_name': 'gemini-2.0-flash-001'}" " 'safety_ratings': []}"
] ]
}, },
"execution_count": 1, "execution_count": 1,
@ -132,9 +124,9 @@
} }
], ],
"source": [ "source": [
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n", "\n",
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\")\n",
"msg = llm.invoke(\"What's the oldest known example of cuneiform\")\n", "msg = llm.invoke(\"What's the oldest known example of cuneiform\")\n",
"msg.response_metadata" "msg.response_metadata"
] ]
@ -199,14 +191,14 @@
"data": { "data": {
"text/plain": [ "text/plain": [
"{'token_usage': {'prompt_tokens': 13,\n", "{'token_usage': {'prompt_tokens': 13,\n",
" 'total_tokens': 219,\n", " 'total_tokens': 306,\n",
" 'completion_tokens': 206},\n", " 'completion_tokens': 293},\n",
" 'model_name': 'mistral-small-latest',\n", " 'model_name': 'mistral-small-latest',\n",
" 'model': 'mistral-small-latest',\n", " 'model': 'mistral-small-latest',\n",
" 'finish_reason': 'stop'}" " 'finish_reason': 'stop'}"
] ]
}, },
"execution_count": 5, "execution_count": 9,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }

View File

@ -19,7 +19,7 @@
"\n", "\n",
"Access Google's Generative AI models, including the Gemini family, directly via the Gemini API or experiment rapidly using Google AI Studio. The `langchain-google-genai` package provides the LangChain integration for these models. This is often the best starting point for individual developers.\n", "Access Google's Generative AI models, including the Gemini family, directly via the Gemini API or experiment rapidly using Google AI Studio. The `langchain-google-genai` package provides the LangChain integration for these models. This is often the best starting point for individual developers.\n",
"\n", "\n",
"For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All examples use the `gemini-2.0-flash` model. Gemini 2.5 Pro and 2.5 Flash can be used via `gemini-2.5-pro-preview-03-25` and `gemini-2.5-flash-preview-04-17`. All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n", "For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n",
"\n", "\n",
"### Integration details\n", "### Integration details\n",
"\n", "\n",
@ -117,7 +117,7 @@
"from langchain_google_genai import ChatGoogleGenerativeAI\n", "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n", "\n",
"llm = ChatGoogleGenerativeAI(\n", "llm = ChatGoogleGenerativeAI(\n",
" model=\"gemini-2.0-flash\",\n", " model=\"gemini-2.5-flash\",\n",
" temperature=0,\n", " temperature=0,\n",
" max_tokens=None,\n", " max_tokens=None,\n",
" timeout=None,\n", " timeout=None,\n",
@ -242,7 +242,7 @@
"\n", "\n",
"### Image Input\n", "### Image Input\n",
"\n", "\n",
"Provide image inputs along with text using a `HumanMessage` with a list content format. The `gemini-2.0-flash` model can handle images." "Provide image inputs along with text using a `HumanMessage` with a list content format. Make sure to use a model that supports image input, such as `gemini-2.5-flash`."
] ]
}, },
{ {
@ -297,7 +297,7 @@
"\n", "\n",
"### Audio Input\n", "### Audio Input\n",
"\n", "\n",
"Provide audio file inputs along with text. Use a model like `gemini-2.0-flash`." "Provide audio file inputs along with text."
] ]
}, },
{ {
@ -340,7 +340,7 @@
"source": [ "source": [
"### Video Input\n", "### Video Input\n",
"\n", "\n",
"Provide video file inputs along with text. Use a model like `gemini-2.0-flash`." "Provide video file inputs along with text."
] ]
}, },
{ {
@ -384,7 +384,7 @@
"source": [ "source": [
"### Image Generation (Multimodal Output)\n", "### Image Generation (Multimodal Output)\n",
"\n", "\n",
"The `gemini-2.0-flash` model can generate text and images inline (image generation is experimental). You need to specify the desired `response_modalities`." "Certain models (such as `gemini-2.0-flash-preview-image-generation`) can generate text and images inline. You need to specify the desired `response_modalities`. See more information on the [Gemini API docs](https://ai.google.dev/gemini-api/docs/image-generation) for details."
] ]
}, },
{ {
@ -830,7 +830,7 @@
"source": [ "source": [
"from langchain_google_genai import ChatGoogleGenerativeAI\n", "from langchain_google_genai import ChatGoogleGenerativeAI\n",
"\n", "\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\")\n",
"\n", "\n",
"\n", "\n",
"async def run_async_calls():\n", "async def run_async_calls():\n",
@ -900,7 +900,7 @@
"source": [ "source": [
"## API reference\n", "## API reference\n",
"\n", "\n",
"For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html" "For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html)."
] ]
} }
], ],

View File

@ -19,7 +19,7 @@
"\n", "\n",
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n", "This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
"\n", "\n",
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n", "ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-2.5-pro`, `gemini-2.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models).\n",
"\n", "\n",
":::info Google Cloud VertexAI vs Google PaLM\n", ":::info Google Cloud VertexAI vs Google PaLM\n",
"\n", "\n",
@ -60,7 +60,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de", "id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -109,7 +109,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": null,
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -117,7 +117,7 @@
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_vertexai import ChatVertexAI\n",
"\n", "\n",
"llm = ChatVertexAI(\n", "llm = ChatVertexAI(\n",
" model=\"gemini-1.5-flash-001\",\n", " model=\"gemini-2.5-flash\",\n",
" temperature=0,\n", " temperature=0,\n",
" max_tokens=None,\n", " max_tokens=None,\n",
" max_retries=6,\n", " max_retries=6,\n",
@ -210,7 +210,7 @@
"source": [ "source": [
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_vertexai import ChatVertexAI\n",
"\n", "\n",
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"google_search\": {}}])\n", "llm = ChatVertexAI(model=\"gemini-2.5-flash\").bind_tools([{\"google_search\": {}}])\n",
"\n", "\n",
"response = llm.invoke(\"What is today's news?\")" "response = llm.invoke(\"What is today's news?\")"
] ]
@ -237,7 +237,7 @@
"source": [ "source": [
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_vertexai import ChatVertexAI\n",
"\n", "\n",
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"code_execution\": {}}])\n", "llm = ChatVertexAI(model=\"gemini-2.5-flash\").bind_tools([{\"code_execution\": {}}])\n",
"\n", "\n",
"response = llm.invoke(\"What is 3^3?\")" "response = llm.invoke(\"What is 3^3?\")"
] ]

View File

@ -23,13 +23,9 @@
"\n", "\n",
"**Note:** This is separate from the `Google Generative AI` integration, it exposes [Vertex AI Generative API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`.\n", "**Note:** This is separate from the `Google Generative AI` integration, it exposes [Vertex AI Generative API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`.\n",
"\n", "\n",
"VertexAI exposes all foundational models available in google cloud:\n", "VertexAI exposes all foundational models available in google cloud.\n",
"- Gemini for Text ( `gemini-1.0-pro` )\n",
"- Gemini with Multimodality ( `gemini-1.5-pro-001` and `gemini-pro-vision`)\n",
"- Palm 2 for Text (`text-bison`)\n",
"- Codey for Code Generation (`code-bison`)\n",
"\n", "\n",
"For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview)" "For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models)"
] ]
}, },
{ {
@ -47,7 +43,7 @@
"\n", "\n",
"To use `Vertex AI Generative AI` you must have the `langchain-google-vertexai` Python package installed and either:\n", "To use `Vertex AI Generative AI` you must have the `langchain-google-vertexai` Python package installed and either:\n",
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n", "- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n", "- Store the path to a service account JSON file as the `GOOGLE_APPLICATION_CREDENTIALS` environment variable\n",
"\n", "\n",
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n", "This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
"\n", "\n",
@ -84,31 +80,14 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain_google_vertexai import VertexAI\n", "from langchain_google_vertexai import VertexAI\n",
"\n", "\n",
"# To use model\n", "# To use model\n",
"model = VertexAI(model_name=\"gemini-pro\")" "model = VertexAI(model_name=\"gemini-2.5-pro\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"NOTE : You can also specify a [Gemini Version](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#gemini-model-versions)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# To specify a particular model version\n",
"model = VertexAI(model_name=\"gemini-1.0-pro-002\")"
] ]
}, },
{ {
@ -285,7 +264,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 17, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -301,7 +280,7 @@
], ],
"source": [ "source": [
"# You may also pass safety_settings to generate method\n", "# You may also pass safety_settings to generate method\n",
"llm = VertexAI(model_name=\"gemini-1.0-pro-001\")\n", "llm = VertexAI(model_name=\"gemini-2.5-pro\")\n",
"\n", "\n",
"# invoke a model response\n", "# invoke a model response\n",
"output = llm.invoke(\n", "output = llm.invoke(\n",
@ -622,15 +601,14 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain_core.messages import HumanMessage\n", "from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_vertexai import ChatVertexAI\n",
"\n", "\n",
"# Use Gemini 1.5 Pro\n", "llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
"llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
] ]
}, },
{ {
@ -683,15 +661,14 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 15, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain_core.messages import HumanMessage\n", "from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_vertexai import ChatVertexAI\n",
"\n", "\n",
"# Use Gemini 1.5 Pro\n", "llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
"llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
] ]
}, },
{ {
@ -741,20 +718,19 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Using Audio with Gemini 1.5 Pro" "### Using Audio with Gemini Models"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 20, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain_core.messages import HumanMessage\n", "from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n", "from langchain_google_vertexai import ChatVertexAI\n",
"\n", "\n",
"# Use Gemini 1.5 Pro\n", "llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
"llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
] ]
}, },
{ {
@ -1226,9 +1202,6 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"NOTE : Specify the correct [Claude 3 Model Versions](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#claude-opus)\n", "NOTE : Specify the correct [Claude 3 Model Versions](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#claude-opus)\n",
"- For Claude 3 Opus (Preview), use `claude-3-opus@20240229`.\n",
"- For Claude 3 Sonnet, use `claude-3-sonnet@20240229`.\n",
"- For Claude 3 Haiku, use `claude-3-haiku@20240307`.\n",
"\n", "\n",
"We don't recommend using the Anthropic Claude 3 model versions that don't include a suffix that starts with an @ symbol (claude-3-opus, claude-3-sonnet, or claude-3-haiku)." "We don't recommend using the Anthropic Claude 3 model versions that don't include a suffix that starts with an @ symbol (claude-3-opus, claude-3-sonnet, or claude-3-haiku)."
] ]

View File

@ -29,14 +29,14 @@ export GOOGLE_API_KEY="YOUR_API_KEY"
### Chat Models ### Chat Models
Use the `ChatGoogleGenerativeAI` class to interact with Gemini 2.0 and 2.5 models. See Use the `ChatGoogleGenerativeAI` class to interact with Gemini models. See
details in [this guide](/docs/integrations/chat/google_generative_ai). details in [this guide](/docs/integrations/chat/google_generative_ai).
```python ```python
from langchain_google_genai import ChatGoogleGenerativeAI from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage from langchain_core.messages import HumanMessage
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash") llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
# Simple text invocation # Simple text invocation
result = llm.invoke("Sing a ballad of LangChain.") result = llm.invoke("Sing a ballad of LangChain.")
@ -61,14 +61,14 @@ The `image_url` can be a public URL, a GCS URI (`gs://...`), a local file path,
### Embedding Models ### Embedding Models
Generate text embeddings using models like `gemini-embedding-exp-03-07` with the `GoogleGenerativeAIEmbeddings` class. Generate text embeddings using models like `gemini-embedding-001` with the `GoogleGenerativeAIEmbeddings` class.
See a [usage example](/docs/integrations/text_embedding/google_generative_ai). See a [usage example](/docs/integrations/text_embedding/google_generative_ai).
```python ```python
from langchain_google_genai import GoogleGenerativeAIEmbeddings from langchain_google_genai import GoogleGenerativeAIEmbeddings
embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-exp-03-07") embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
vector = embeddings.embed_query("What are embeddings?") vector = embeddings.embed_query("What are embeddings?")
print(vector[:5]) print(vector[:5])
``` ```
@ -83,7 +83,7 @@ See a [usage example](/docs/integrations/llms/google_ai).
```python ```python
from langchain_google_genai import GoogleGenerativeAI from langchain_google_genai import GoogleGenerativeAI
llm = GoogleGenerativeAI(model="gemini-2.0-flash") llm = GoogleGenerativeAI(model="gemini-2.5-flash")
result = llm.invoke("Sing a ballad of LangChain.") result = llm.invoke("Sing a ballad of LangChain.")
print(result) print(result)
``` ```
@ -105,7 +105,7 @@ Google Cloud integrations typically use Application Default Credentials (ADC). R
#### Vertex AI #### Vertex AI
Access chat models like `Gemini` via the Vertex AI platform. Access chat models like Gemini via the Vertex AI platform.
See a [usage example](/docs/integrations/chat/google_vertex_ai_palm). See a [usage example](/docs/integrations/chat/google_vertex_ai_palm).
@ -135,7 +135,7 @@ from langchain_google_vertexai.model_garden_maas.mistral import VertexModelGarde
#### Gemma local from Hugging Face #### Gemma local from Hugging Face
>Local `Gemma` model loaded from `HuggingFace`. Requires `langchain-google-vertexai`. >Local Gemma model loaded from HuggingFace. Requires `langchain-google-vertexai`.
```python ```python
from langchain_google_vertexai.gemma import GemmaChatLocalHF from langchain_google_vertexai.gemma import GemmaChatLocalHF
@ -143,7 +143,7 @@ from langchain_google_vertexai.gemma import GemmaChatLocalHF
#### Gemma local from Kaggle #### Gemma local from Kaggle
>Local `Gemma` model loaded from `Kaggle`. Requires `langchain-google-vertexai`. >Local Gemma model loaded from Kaggle. Requires `langchain-google-vertexai`.
```python ```python
from langchain_google_vertexai.gemma import GemmaChatLocalKaggle from langchain_google_vertexai.gemma import GemmaChatLocalKaggle
@ -159,7 +159,7 @@ from langchain_google_vertexai.gemma import GemmaChatVertexAIModelGarden
#### Vertex AI image captioning #### Vertex AI image captioning
>Implementation of the `Image Captioning model` as a chat. Requires `langchain-google-vertexai`. >Implementation of the Image Captioning model as a chat. Requires `langchain-google-vertexai`.
```python ```python
from langchain_google_vertexai.vision_models import VertexAIImageCaptioningChat from langchain_google_vertexai.vision_models import VertexAIImageCaptioningChat
@ -196,7 +196,7 @@ interface.
#### Vertex AI Model Garden #### Vertex AI Model Garden
Access `Gemini`, and hundreds of OSS models via `Vertex AI Model Garden` service. Requires `langchain-google-vertexai`. Access Gemini, and hundreds of OSS models via Vertex AI Model Garden service. Requires `langchain-google-vertexai`.
See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model-garden). See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model-garden).
@ -206,7 +206,7 @@ from langchain_google_vertexai import VertexAIModelGarden
#### Gemma local from Hugging Face #### Gemma local from Hugging Face
>Local `Gemma` model loaded from `HuggingFace`. Requires `langchain-google-vertexai`. >Local Gemma model loaded from HuggingFace. Requires `langchain-google-vertexai`.
```python ```python
from langchain_google_vertexai.gemma import GemmaLocalHF from langchain_google_vertexai.gemma import GemmaLocalHF
@ -214,7 +214,7 @@ from langchain_google_vertexai.gemma import GemmaLocalHF
#### Gemma local from Kaggle #### Gemma local from Kaggle
>Local `Gemma` model loaded from `Kaggle`. Requires `langchain-google-vertexai`. >Local Gemma model loaded from Kaggle. Requires `langchain-google-vertexai`.
```python ```python
from langchain_google_vertexai.gemma import GemmaLocalKaggle from langchain_google_vertexai.gemma import GemmaLocalKaggle
@ -230,7 +230,7 @@ from langchain_google_vertexai.gemma import GemmaVertexAIModelGarden
#### Vertex AI image captioning #### Vertex AI image captioning
>Implementation of the `Image Captioning model` as an LLM. Requires `langchain-google-vertexai`. >Implementation of the Image Captioning model as an LLM. Requires `langchain-google-vertexai`.
```python ```python
from langchain_google_vertexai.vision_models import VertexAIImageCaptioning from langchain_google_vertexai.vision_models import VertexAIImageCaptioning
@ -1138,7 +1138,7 @@ Integrations with various Google services beyond the core Cloud Platform.
#### Google Drive #### Google Drive
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) file storage. Currently supports `Google Docs`. >[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) file storage. Currently supports Google Docs.
Install with Drive dependencies: Install with Drive dependencies:
@ -1416,7 +1416,7 @@ from langchain_community.utilities import GoogleSerperAPIWrapper
#### YouTube Search Tool #### YouTube Search Tool
>Search `YouTube` videos without the official API. Requires `youtube_search` package. >Search YouTube videos without the official API. Requires `youtube_search` package.
```bash ```bash
pip install youtube_search langchain # Requires base langchain pip install youtube_search langchain # Requires base langchain

View File

@ -101,7 +101,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 20, "execution_count": null,
"id": "eedc551e-a1f3-4fd8-8d65-4e0784c4441b", "id": "eedc551e-a1f3-4fd8-8d65-4e0784c4441b",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -123,7 +123,7 @@
"source": [ "source": [
"from langchain_google_genai import GoogleGenerativeAIEmbeddings\n", "from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
"\n", "\n",
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-exp-03-07\")\n", "embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-001\")\n",
"vector = embeddings.embed_query(\"hello, world!\")\n", "vector = embeddings.embed_query(\"hello, world!\")\n",
"vector[:5]" "vector[:5]"
] ]
@ -245,7 +245,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 19, "execution_count": null,
"id": "f1f077db-8eb4-49f7-8866-471a8528dcdb", "id": "f1f077db-8eb4-49f7-8866-471a8528dcdb",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -267,10 +267,10 @@
"from sklearn.metrics.pairwise import cosine_similarity\n", "from sklearn.metrics.pairwise import cosine_similarity\n",
"\n", "\n",
"query_embeddings = GoogleGenerativeAIEmbeddings(\n", "query_embeddings = GoogleGenerativeAIEmbeddings(\n",
" model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_QUERY\"\n", " model=\"models/gemini-embedding-001\", task_type=\"RETRIEVAL_QUERY\"\n",
")\n", ")\n",
"doc_embeddings = GoogleGenerativeAIEmbeddings(\n", "doc_embeddings = GoogleGenerativeAIEmbeddings(\n",
" model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_DOCUMENT\"\n", " model=\"models/gemini-embedding-001\", task_type=\"RETRIEVAL_DOCUMENT\"\n",
")\n", ")\n",
"\n", "\n",
"q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n", "q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n",

View File

@ -253,7 +253,7 @@
"from langgraph.prebuilt import create_react_agent\n", "from langgraph.prebuilt import create_react_agent\n",
"\n", "\n",
"# Initialize the LLM\n", "# Initialize the LLM\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
"\n", "\n",
"# Initialize the Bright Data Web Scraper API tool\n", "# Initialize the Bright Data Web Scraper API tool\n",
"scraper_tool = BrightDataWebScraperAPI(bright_data_api_key=\"your-api-key\")\n", "scraper_tool = BrightDataWebScraperAPI(bright_data_api_key=\"your-api-key\")\n",

View File

@ -233,7 +233,7 @@
"from langgraph.prebuilt import create_react_agent\n", "from langgraph.prebuilt import create_react_agent\n",
"\n", "\n",
"# Initialize the LLM\n", "# Initialize the LLM\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
"\n", "\n",
"# Initialize the Bright Data SERP tool\n", "# Initialize the Bright Data SERP tool\n",
"serp_tool = BrightDataSERP(\n", "serp_tool = BrightDataSERP(\n",

View File

@ -275,7 +275,7 @@
"from langgraph.prebuilt import create_react_agent\n", "from langgraph.prebuilt import create_react_agent\n",
"\n", "\n",
"# Initialize the LLM\n", "# Initialize the LLM\n",
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n", "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
"\n", "\n",
"# Initialize the tool\n", "# Initialize the tool\n",
"bright_data_tool = BrightDataUnlocker(bright_data_api_key=\"your-api-key\")\n", "bright_data_tool = BrightDataUnlocker(bright_data_api_key=\"your-api-key\")\n",

View File

@ -36,17 +36,17 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
return ( return (
<div style={{ display: 'flex', alignItems: 'center', marginBottom: '1rem', gap: '0.75rem' }}> <div style={{ display: 'flex', alignItems: 'center', marginBottom: '1rem', gap: '0.75rem' }}>
<span style={{ <span style={{
fontSize: '1rem', fontSize: '1rem',
fontWeight: '500', fontWeight: '500',
}}> }}>
Select <a href={link}>{text}</a>: Select <a href={link}>{text}</a>:
</span> </span>
<div className={`dropdown ${isOpen ? 'dropdown--show' : ''}`}> <div className={`dropdown ${isOpen ? 'dropdown--show' : ''}`}>
<button <button
className="button button--secondary" className="button button--secondary"
onClick={() => setIsOpen(!isOpen)} onClick={() => setIsOpen(!isOpen)}
style={{ style={{
backgroundColor: 'var(--ifm-background-color)', backgroundColor: 'var(--ifm-background-color)',
border: '1px solid var(--ifm-color-emphasis-300)', border: '1px solid var(--ifm-color-emphasis-300)',
fontWeight: 'normal', fontWeight: 'normal',
@ -56,7 +56,7 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
}} }}
> >
{selectedOption.label} {selectedOption.label}
<span style={{ <span style={{
marginLeft: '0.4rem', marginLeft: '0.4rem',
fontSize: '0.875rem' fontSize: '0.875rem'
}}></span> }}></span>
@ -69,9 +69,9 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
}}> }}>
{options.map((option) => ( {options.map((option) => (
<li key={option.value}> <li key={option.value}>
<a <a
className={`dropdown__link ${option.value === selectedOption.value ? 'dropdown__link--active' : ''}`} className={`dropdown__link ${option.value === selectedOption.value ? 'dropdown__link--active' : ''}`}
href="#" href="#"
onClick={(e) => { onClick={(e) => {
e.preventDefault(); e.preventDefault();
onSelect(option.value); onSelect(option.value);
@ -138,14 +138,14 @@ ${llmVarName} = AzureChatOpenAI(
{ {
value: "google_genai", value: "google_genai",
label: "Google Gemini", label: "Google Gemini",
model: "gemini-2.0-flash", model: "gemini-2.5-flash",
apiKeyName: "GOOGLE_API_KEY", apiKeyName: "GOOGLE_API_KEY",
packageName: "langchain[google-genai]", packageName: "langchain[google-genai]",
}, },
{ {
value: "google_vertexai", value: "google_vertexai",
label: "Google Vertex", label: "Google Vertex",
model: "gemini-2.0-flash-001", model: "gemini-2.5-flash",
apiKeyText: "# Ensure your VertexAI credentials are configured", apiKeyText: "# Ensure your VertexAI credentials are configured",
packageName: "langchain[google-vertexai]", packageName: "langchain[google-vertexai]",
}, },
@ -204,8 +204,8 @@ ${llmVarName} = AzureChatOpenAI(
text: `from langchain_ibm import ChatWatsonx text: `from langchain_ibm import ChatWatsonx
${llmVarName} = ChatWatsonx( ${llmVarName} = ChatWatsonx(
model_id="ibm/granite-34b-code-instruct", model_id="ibm/granite-34b-code-instruct",
url="https://us-south.ml.cloud.ibm.com", url="https://us-south.ml.cloud.ibm.com",
project_id="<WATSONX PROJECT_ID>" project_id="<WATSONX PROJECT_ID>"
)`, )`,
apiKeyName: "WATSONX_APIKEY", apiKeyName: "WATSONX_APIKEY",
@ -238,18 +238,18 @@ ${llmVarName} = ChatWatsonx(
})); }));
const modelOptions = tabItems const modelOptions = tabItems
.map((item) => ({ .map((item) => ({
value: item.value, value: item.value,
label: item.label, label: item.label,
})); }));
const selectedTabItem = tabItems.find( const selectedTabItem = tabItems.find(
(option) => option.value === selectedModel (option) => option.value === selectedModel
); );
let apiKeyText = ""; let apiKeyText = "";
if (selectedTabItem.apiKeyName) { if (selectedTabItem.apiKeyName) {
apiKeyText = `import getpass apiKeyText = `import getpass
import os import os
if not os.environ.get("${selectedTabItem.apiKeyName}"): if not os.environ.get("${selectedTabItem.apiKeyName}"):
@ -264,7 +264,7 @@ ${llmVarName} = init_chat_model("${selectedTabItem.model}", model_provider="${se
return ( return (
<div> <div>
<CustomDropdown <CustomDropdown
selectedOption={selectedTabItem} selectedOption={selectedTabItem}
options={modelOptions} options={modelOptions}
onSelect={setSelectedModel} onSelect={setSelectedModel}
@ -279,4 +279,4 @@ ${llmVarName} = init_chat_model("${selectedTabItem.model}", model_provider="${se
</CodeBlock> </CodeBlock>
</div> </div>
); );
} }

View File

@ -3,206 +3,206 @@ import CodeBlock from "@theme-original/CodeBlock";
import { CustomDropdown } from './ChatModelTabs'; import { CustomDropdown } from './ChatModelTabs';
export default function EmbeddingTabs(props) { export default function EmbeddingTabs(props) {
const [selectedModel, setSelectedModel] = useState("OpenAI"); const [selectedModel, setSelectedModel] = useState("OpenAI");
const { const {
openaiParams, openaiParams,
hideOpenai, hideOpenai,
azureOpenaiParams, azureOpenaiParams,
hideAzureOpenai, hideAzureOpenai,
googleGenAIParams, googleGenAIParams,
hideGoogleGenAI, hideGoogleGenAI,
googleVertexAIParams, googleVertexAIParams,
hideGoogleVertexAI, hideGoogleVertexAI,
awsParams, awsParams,
hideAws, hideAws,
huggingFaceParams, huggingFaceParams,
hideHuggingFace, hideHuggingFace,
ollamaParams, ollamaParams,
hideOllama, hideOllama,
cohereParams, cohereParams,
hideCohere, hideCohere,
mistralParams, mistralParams,
hideMistral, hideMistral,
nomicParams, nomicParams,
hideNomic, hideNomic,
nvidiaParams, nvidiaParams,
hideNvidia, hideNvidia,
voyageaiParams, voyageaiParams,
hideVoyageai, hideVoyageai,
ibmParams, ibmParams,
hideIBM, hideIBM,
fakeEmbeddingParams, fakeEmbeddingParams,
hideFakeEmbedding, hideFakeEmbedding,
customVarName, customVarName,
} = props; } = props;
const openAIParamsOrDefault = openaiParams ?? `model="text-embedding-3-large"`; const openAIParamsOrDefault = openaiParams ?? `model="text-embedding-3-large"`;
const azureParamsOrDefault = const azureParamsOrDefault =
azureOpenaiParams ?? azureOpenaiParams ??
`\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`; `\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`;
const googleGenAIParamsOrDefault = googleGenAIParams ?? `model="models/embedding-001"`; const googleGenAIParamsOrDefault = googleGenAIParams ?? `model="models/gemini-embedding-001"`;
const googleVertexAIParamsOrDefault = googleVertexAIParams ?? `model="text-embedding-004"`; const googleVertexAIParamsOrDefault = googleVertexAIParams ?? `model="text-embedding-005"`;
const awsParamsOrDefault = awsParams ?? `model_id="amazon.titan-embed-text-v2:0"`; const awsParamsOrDefault = awsParams ?? `model_id="amazon.titan-embed-text-v2:0"`;
const huggingFaceParamsOrDefault = huggingFaceParams ?? `model_name="sentence-transformers/all-mpnet-base-v2"`; const huggingFaceParamsOrDefault = huggingFaceParams ?? `model_name="sentence-transformers/all-mpnet-base-v2"`;
const ollamaParamsOrDefault = ollamaParams ?? `model="llama3"`; const ollamaParamsOrDefault = ollamaParams ?? `model="llama3"`;
const cohereParamsOrDefault = cohereParams ?? `model="embed-english-v3.0"`; const cohereParamsOrDefault = cohereParams ?? `model="embed-english-v3.0"`;
const mistralParamsOrDefault = mistralParams ?? `model="mistral-embed"`; const mistralParamsOrDefault = mistralParams ?? `model="mistral-embed"`;
const nomicsParamsOrDefault = nomicParams ?? `model="nomic-embed-text-v1.5"`; const nomicsParamsOrDefault = nomicParams ?? `model="nomic-embed-text-v1.5"`;
const nvidiaParamsOrDefault = nvidiaParams ?? `model="NV-Embed-QA"`; const nvidiaParamsOrDefault = nvidiaParams ?? `model="NV-Embed-QA"`;
const voyageaiParamsOrDefault = voyageaiParams ?? `model="voyage-3"`; const voyageaiParamsOrDefault = voyageaiParams ?? `model="voyage-3"`;
const ibmParamsOrDefault = ibmParams ?? const ibmParamsOrDefault = ibmParams ??
`\n model_id="ibm/slate-125m-english-rtrvr",\n url="https://us-south.ml.cloud.ibm.com",\n project_id="<WATSONX PROJECT_ID>",\n`; `\n model_id="ibm/slate-125m-english-rtrvr",\n url="https://us-south.ml.cloud.ibm.com",\n project_id="<WATSONX PROJECT_ID>",\n`;
const fakeEmbeddingParamsOrDefault = fakeEmbeddingParams ?? `size=4096`; const fakeEmbeddingParamsOrDefault = fakeEmbeddingParams ?? `size=4096`;
const embeddingVarName = customVarName ?? "embeddings"; const embeddingVarName = customVarName ?? "embeddings";
const tabItems = [
{
value: "OpenAI",
label: "OpenAI",
text: `from langchain_openai import OpenAIEmbeddings\n\n${embeddingVarName} = OpenAIEmbeddings(${openAIParamsOrDefault})`,
apiKeyName: "OPENAI_API_KEY",
packageName: "langchain-openai",
default: true,
shouldHide: hideOpenai,
},
{
value: "Azure",
label: "Azure",
text: `from langchain_openai import AzureOpenAIEmbeddings\n\n${embeddingVarName} = AzureOpenAIEmbeddings(${azureParamsOrDefault})`,
apiKeyName: "AZURE_OPENAI_API_KEY",
packageName: "langchain-openai",
default: false,
shouldHide: hideAzureOpenai,
},
{
value: "GoogleGenAI",
label: "Google Gemini",
text: `from langchain_google_genai import GoogleGenerativeAIEmbeddings\n\n${embeddingVarName} = GoogleGenerativeAIEmbeddings(${googleGenAIParamsOrDefault})`,
apiKeyName: "GOOGLE_API_KEY",
packageName: "langchain-google-genai",
default: false,
shouldHide: hideGoogleGenAI,
},
{
value: "GoogleVertexAI",
label: "Google Vertex",
text: `from langchain_google_vertexai import VertexAIEmbeddings\n\n${embeddingVarName} = VertexAIEmbeddings(${googleVertexAIParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-google-vertexai",
default: false,
shouldHide: hideGoogleVertexAI,
},
{
value: "AWS",
label: "AWS",
text: `from langchain_aws import BedrockEmbeddings\n\n${embeddingVarName} = BedrockEmbeddings(${awsParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-aws",
default: false,
shouldHide: hideAws,
},
{
value: "HuggingFace",
label: "HuggingFace",
text: `from langchain_huggingface import HuggingFaceEmbeddings\n\n${embeddingVarName} = HuggingFaceEmbeddings(${huggingFaceParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-huggingface",
default: false,
shouldHide: hideHuggingFace,
},
{
value: "Ollama",
label: "Ollama",
text: `from langchain_ollama import OllamaEmbeddings\n\n${embeddingVarName} = OllamaEmbeddings(${ollamaParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-ollama",
default: false,
shouldHide: hideOllama,
},
{
value: "Cohere",
label: "Cohere",
text: `from langchain_cohere import CohereEmbeddings\n\n${embeddingVarName} = CohereEmbeddings(${cohereParamsOrDefault})`,
apiKeyName: "COHERE_API_KEY",
packageName: "langchain-cohere",
default: false,
shouldHide: hideCohere,
},
{
value: "MistralAI",
label: "MistralAI",
text: `from langchain_mistralai import MistralAIEmbeddings\n\n${embeddingVarName} = MistralAIEmbeddings(${mistralParamsOrDefault})`,
apiKeyName: "MISTRALAI_API_KEY",
packageName: "langchain-mistralai",
default: false,
shouldHide: hideMistral,
},
{
value: "Nomic",
label: "Nomic",
text: `from langchain_nomic import NomicEmbeddings\n\n${embeddingVarName} = NomicEmbeddings(${nomicsParamsOrDefault})`,
apiKeyName: "NOMIC_API_KEY",
packageName: "langchain-nomic",
default: false,
shouldHide: hideNomic,
},
{
value: "NVIDIA",
label: "NVIDIA",
text: `from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings\n\n${embeddingVarName} = NVIDIAEmbeddings(${nvidiaParamsOrDefault})`,
apiKeyName: "NVIDIA_API_KEY",
packageName: "langchain-nvidia-ai-endpoints",
default: false,
shouldHide: hideNvidia,
},
{
value: "Voyage AI",
label: "Voyage AI",
text: `from langchain-voyageai import VoyageAIEmbeddings\n\n${embeddingVarName} = VoyageAIEmbeddings(${voyageaiParamsOrDefault})`,
apiKeyName: "VOYAGE_API_KEY",
packageName: "langchain-voyageai",
default: false,
shouldHide: hideVoyageai,
},
{
value: "IBM",
label: "IBM watsonx",
text: `from langchain_ibm import WatsonxEmbeddings\n\n${embeddingVarName} = WatsonxEmbeddings(${ibmParamsOrDefault})`,
apiKeyName: "WATSONX_APIKEY",
packageName: "langchain-ibm",
default: false,
shouldHide: hideIBM,
},
{
value: "Fake",
label: "Fake",
text: `from langchain_core.embeddings import DeterministicFakeEmbedding\n\n${embeddingVarName} = DeterministicFakeEmbedding(${fakeEmbeddingParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-core",
default: false,
shouldHide: hideFakeEmbedding,
},
];
const tabItems = [
{
value: "OpenAI",
label: "OpenAI",
text: `from langchain_openai import OpenAIEmbeddings\n\n${embeddingVarName} = OpenAIEmbeddings(${openAIParamsOrDefault})`,
apiKeyName: "OPENAI_API_KEY",
packageName: "langchain-openai",
default: true,
shouldHide: hideOpenai,
},
{
value: "Azure",
label: "Azure",
text: `from langchain_openai import AzureOpenAIEmbeddings\n\n${embeddingVarName} = AzureOpenAIEmbeddings(${azureParamsOrDefault})`,
apiKeyName: "AZURE_OPENAI_API_KEY",
packageName: "langchain-openai",
default: false,
shouldHide: hideAzureOpenai,
},
{
value: "GoogleGenAI",
label: "Google Gemini",
text: `from langchain_google_genai import GoogleGenerativeAIEmbeddings\n\n${embeddingVarName} = GoogleGenerativeAIEmbeddings(${googleGenAIParamsOrDefault})`,
apiKeyName: "GOOGLE_API_KEY",
packageName: "langchain-google-genai",
default: false,
shouldHide: hideGoogleGenAI,
},
{
value: "GoogleVertexAI",
label: "Google Vertex",
text: `from langchain_google_vertexai import VertexAIEmbeddings\n\n${embeddingVarName} = VertexAIEmbeddings(${googleVertexAIParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-google-vertexai",
default: false,
shouldHide: hideGoogleVertexAI,
},
{
value: "AWS",
label: "AWS",
text: `from langchain_aws import BedrockEmbeddings\n\n${embeddingVarName} = BedrockEmbeddings(${awsParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-aws",
default: false,
shouldHide: hideAws,
},
{
value: "HuggingFace",
label: "HuggingFace",
text: `from langchain_huggingface import HuggingFaceEmbeddings\n\n${embeddingVarName} = HuggingFaceEmbeddings(${huggingFaceParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-huggingface",
default: false,
shouldHide: hideHuggingFace,
},
{
value: "Ollama",
label: "Ollama",
text: `from langchain_ollama import OllamaEmbeddings\n\n${embeddingVarName} = OllamaEmbeddings(${ollamaParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-ollama",
default: false,
shouldHide: hideOllama,
},
{
value: "Cohere",
label: "Cohere",
text: `from langchain_cohere import CohereEmbeddings\n\n${embeddingVarName} = CohereEmbeddings(${cohereParamsOrDefault})`,
apiKeyName: "COHERE_API_KEY",
packageName: "langchain-cohere",
default: false,
shouldHide: hideCohere,
},
{
value: "MistralAI",
label: "MistralAI",
text: `from langchain_mistralai import MistralAIEmbeddings\n\n${embeddingVarName} = MistralAIEmbeddings(${mistralParamsOrDefault})`,
apiKeyName: "MISTRALAI_API_KEY",
packageName: "langchain-mistralai",
default: false,
shouldHide: hideMistral,
},
{
value: "Nomic",
label: "Nomic",
text: `from langchain_nomic import NomicEmbeddings\n\n${embeddingVarName} = NomicEmbeddings(${nomicsParamsOrDefault})`,
apiKeyName: "NOMIC_API_KEY",
packageName: "langchain-nomic",
default: false,
shouldHide: hideNomic,
},
{
value: "NVIDIA",
label: "NVIDIA",
text: `from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings\n\n${embeddingVarName} = NVIDIAEmbeddings(${nvidiaParamsOrDefault})`,
apiKeyName: "NVIDIA_API_KEY",
packageName: "langchain-nvidia-ai-endpoints",
default: false,
shouldHide: hideNvidia,
},
{
value: "Voyage AI",
label: "Voyage AI",
text: `from langchain-voyageai import VoyageAIEmbeddings\n\n${embeddingVarName} = VoyageAIEmbeddings(${voyageaiParamsOrDefault})`,
apiKeyName: "VOYAGE_API_KEY",
packageName: "langchain-voyageai",
default: false,
shouldHide: hideVoyageai,
},
{
value: "IBM",
label: "IBM watsonx",
text: `from langchain_ibm import WatsonxEmbeddings\n\n${embeddingVarName} = WatsonxEmbeddings(${ibmParamsOrDefault})`,
apiKeyName: "WATSONX_APIKEY",
packageName: "langchain-ibm",
default: false,
shouldHide: hideIBM,
},
{
value: "Fake",
label: "Fake",
text: `from langchain_core.embeddings import DeterministicFakeEmbedding\n\n${embeddingVarName} = DeterministicFakeEmbedding(${fakeEmbeddingParamsOrDefault})`,
apiKeyName: undefined,
packageName: "langchain-core",
default: false,
shouldHide: hideFakeEmbedding,
},
];
const modelOptions = tabItems const modelOptions = tabItems
.filter((item) => !item.shouldHide) .filter((item) => !item.shouldHide)
.map((item) => ({ .map((item) => ({
value: item.value, value: item.value,
label: item.label, label: item.label,
text: item.text, text: item.text,
apiKeyName: item.apiKeyName, apiKeyName: item.apiKeyName,
apiKeyText: item.apiKeyText, apiKeyText: item.apiKeyText,
packageName: item.packageName, packageName: item.packageName,
})); }));
const selectedOption = modelOptions.find( const selectedOption = modelOptions.find(
(option) => option.value === selectedModel (option) => option.value === selectedModel
); );
let apiKeyText = ""; let apiKeyText = "";
if (selectedOption.apiKeyName) { if (selectedOption.apiKeyName) {
apiKeyText = `import getpass apiKeyText = `import getpass
import os import os
if not os.environ.get("${selectedOption.apiKeyName}"): if not os.environ.get("${selectedOption.apiKeyName}"):
@ -211,21 +211,21 @@ if not os.environ.get("${selectedOption.apiKeyName}"):
apiKeyText = selectedOption.apiKeyText; apiKeyText = selectedOption.apiKeyText;
} }
return ( return (
<div> <div>
<CustomDropdown <CustomDropdown
selectedOption={selectedOption} selectedOption={selectedOption}
options={modelOptions} options={modelOptions}
onSelect={setSelectedModel} onSelect={setSelectedModel}
modelType="embeddings" modelType="embeddings"
/> />
<CodeBlock language="bash"> <CodeBlock language="bash">
{`pip install -qU ${selectedOption.packageName}`} {`pip install -qU ${selectedOption.packageName}`}
</CodeBlock> </CodeBlock>
<CodeBlock language="python"> <CodeBlock language="python">
{apiKeyText ? apiKeyText + "\n\n" + selectedOption.text : selectedOption.text} {apiKeyText ? apiKeyText + "\n\n" + selectedOption.text : selectedOption.text}
</CodeBlock> </CodeBlock>
</div> </div>
); );
} }

View File

@ -3,15 +3,7 @@ from __future__ import annotations
import warnings import warnings
from collections.abc import AsyncIterator, Iterator, Sequence from collections.abc import AsyncIterator, Iterator, Sequence
from importlib import util from importlib import util
from typing import ( from typing import Any, Callable, Literal, Optional, Union, cast, overload
Any,
Callable,
Literal,
Optional,
Union,
cast,
overload,
)
from langchain_core.language_models import ( from langchain_core.language_models import (
BaseChatModel, BaseChatModel,
@ -188,7 +180,7 @@ def init_chat_model(
o3_mini = init_chat_model("openai:o3-mini", temperature=0) o3_mini = init_chat_model("openai:o3-mini", temperature=0)
claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0) claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.0-flash", temperature=0) gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
o3_mini.invoke("what's your name") o3_mini.invoke("what's your name")
claude_sonnet.invoke("what's your name") claude_sonnet.invoke("what's your name")

View File

@ -13,10 +13,7 @@ from typing import (
overload, overload,
) )
from langchain_core.language_models import ( from langchain_core.language_models import BaseChatModel, LanguageModelInput
BaseChatModel,
LanguageModelInput,
)
from langchain_core.messages import AnyMessage, BaseMessage from langchain_core.messages import AnyMessage, BaseMessage
from langchain_core.runnables import Runnable, RunnableConfig, ensure_config from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
from typing_extensions import TypeAlias, override from typing_extensions import TypeAlias, override
@ -177,7 +174,7 @@ def init_chat_model(
o3_mini = init_chat_model("openai:o3-mini", temperature=0) o3_mini = init_chat_model("openai:o3-mini", temperature=0)
claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0) claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.0-flash", temperature=0) gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
o3_mini.invoke("what's your name") o3_mini.invoke("what's your name")
claude_sonnet.invoke("what's your name") claude_sonnet.invoke("what's your name")