mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-31 16:39:20 +00:00
fix: use new Google model names in examples (#32288)
This commit is contained in:
parent
6f10160a45
commit
8db16b5633
@ -144,7 +144,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": null,
|
||||
"id": "kWDWfSDBMPl8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -185,7 +185,7 @@
|
||||
" )\n",
|
||||
" # Text summary chain\n",
|
||||
" model = VertexAI(\n",
|
||||
" temperature=0, model_name=\"gemini-2.0-flash-lite-001\", max_tokens=1024\n",
|
||||
" temperature=0, model_name=\"gemini-2.5-flash\", max_tokens=1024\n",
|
||||
" ).with_fallbacks([empty_response])\n",
|
||||
" summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n",
|
||||
"\n",
|
||||
@ -235,7 +235,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": null,
|
||||
"id": "PeK9bzXv3olF",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -254,7 +254,7 @@
|
||||
"\n",
|
||||
"def image_summarize(img_base64, prompt):\n",
|
||||
" \"\"\"Make image summary\"\"\"\n",
|
||||
" model = ChatVertexAI(model=\"gemini-2.0-flash\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(model=\"gemini-2.5-flash\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" msg = model.invoke(\n",
|
||||
" [\n",
|
||||
@ -431,7 +431,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": null,
|
||||
"id": "GlwCErBaCKQW",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -553,7 +553,7 @@
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" # Multi-modal LLM\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-2.0-flash\", max_tokens=1024)\n",
|
||||
" model = ChatVertexAI(temperature=0, model_name=\"gemini-2.5-flash\", max_tokens=1024)\n",
|
||||
"\n",
|
||||
" # RAG pipeline\n",
|
||||
" chain = (\n",
|
||||
|
@ -373,7 +373,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"id": "a0b91b29-dbd6-4c94-8f24-05471adc7598",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -397,7 +397,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"# Pass to LLM\n",
|
||||
"llm = init_chat_model(\"google_genai:gemini-2.0-flash-001\")\n",
|
||||
"llm = init_chat_model(\"google_genai:gemini-2.5-flash\")\n",
|
||||
"\n",
|
||||
"message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
|
@ -23,9 +23,9 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'token_usage': {'completion_tokens': 93,\n",
|
||||
"{'token_usage': {'completion_tokens': 88,\n",
|
||||
" 'prompt_tokens': 16,\n",
|
||||
" 'total_tokens': 109,\n",
|
||||
" 'total_tokens': 104,\n",
|
||||
" 'completion_tokens_details': {'accepted_prediction_tokens': 0,\n",
|
||||
" 'audio_tokens': 0,\n",
|
||||
" 'reasoning_tokens': 0,\n",
|
||||
@ -33,7 +33,7 @@
|
||||
" 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}},\n",
|
||||
" 'model_name': 'gpt-4o-mini-2024-07-18',\n",
|
||||
" 'system_fingerprint': 'fp_34a54ae93c',\n",
|
||||
" 'id': 'chatcmpl-ByJtse6I3U1lmVyPscLCjzydCvfDO',\n",
|
||||
" 'id': 'chatcmpl-ByN1Qkvqb5fAGKKzXXxZ3rBlnqkWs',\n",
|
||||
" 'service_tier': 'default',\n",
|
||||
" 'finish_reason': 'stop',\n",
|
||||
" 'logprobs': None}"
|
||||
@ -69,14 +69,14 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'id': 'msg_017S9H7GMwA5RdZ1wHxzXoeX',\n",
|
||||
"{'id': 'msg_01NTWnqvbNKSjGfqQL7xikau',\n",
|
||||
" 'model': 'claude-3-7-sonnet-20250219',\n",
|
||||
" 'stop_reason': 'end_turn',\n",
|
||||
" 'stop_sequence': None,\n",
|
||||
" 'usage': {'cache_creation_input_tokens': 0,\n",
|
||||
" 'cache_read_input_tokens': 0,\n",
|
||||
" 'input_tokens': 17,\n",
|
||||
" 'output_tokens': 180,\n",
|
||||
" 'output_tokens': 197,\n",
|
||||
" 'server_tool_use': None,\n",
|
||||
" 'service_tier': 'standard'},\n",
|
||||
" 'model_name': 'claude-3-7-sonnet-20250219'}"
|
||||
@ -100,30 +100,22 @@
|
||||
"id": "c1f24f69-18f6-43c1-8b26-3f88ec515259",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Google VertexAI"
|
||||
"## Google Generative AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "39549336-25f5-4839-9846-f687cd77e59b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'is_blocked': False,\n",
|
||||
" 'safety_ratings': [],\n",
|
||||
" 'usage_metadata': {'prompt_token_count': 10,\n",
|
||||
" 'candidates_token_count': 55,\n",
|
||||
" 'total_token_count': 65,\n",
|
||||
" 'prompt_tokens_details': [{'modality': 1, 'token_count': 10}],\n",
|
||||
" 'candidates_tokens_details': [{'modality': 1, 'token_count': 55}],\n",
|
||||
" 'cached_content_token_count': 0,\n",
|
||||
" 'cache_tokens_details': []},\n",
|
||||
"{'prompt_feedback': {'block_reason': 0, 'safety_ratings': []},\n",
|
||||
" 'finish_reason': 'STOP',\n",
|
||||
" 'avg_logprobs': -0.251378042047674,\n",
|
||||
" 'model_name': 'gemini-2.0-flash-001'}"
|
||||
" 'model_name': 'gemini-2.5-flash',\n",
|
||||
" 'safety_ratings': []}"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@ -132,9 +124,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\")\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\")\n",
|
||||
"msg = llm.invoke(\"What's the oldest known example of cuneiform\")\n",
|
||||
"msg.response_metadata"
|
||||
]
|
||||
@ -199,14 +191,14 @@
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'token_usage': {'prompt_tokens': 13,\n",
|
||||
" 'total_tokens': 219,\n",
|
||||
" 'completion_tokens': 206},\n",
|
||||
" 'total_tokens': 306,\n",
|
||||
" 'completion_tokens': 293},\n",
|
||||
" 'model_name': 'mistral-small-latest',\n",
|
||||
" 'model': 'mistral-small-latest',\n",
|
||||
" 'finish_reason': 'stop'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
"\n",
|
||||
"Access Google's Generative AI models, including the Gemini family, directly via the Gemini API or experiment rapidly using Google AI Studio. The `langchain-google-genai` package provides the LangChain integration for these models. This is often the best starting point for individual developers.\n",
|
||||
"\n",
|
||||
"For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All examples use the `gemini-2.0-flash` model. Gemini 2.5 Pro and 2.5 Flash can be used via `gemini-2.5-pro-preview-03-25` and `gemini-2.5-flash-preview-04-17`. All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n",
|
||||
"For information on the latest models, their features, context windows, etc. head to the [Google AI docs](https://ai.google.dev/gemini-api/docs/models/gemini). All model ids can be found in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/models).\n",
|
||||
"\n",
|
||||
"### Integration details\n",
|
||||
"\n",
|
||||
@ -117,7 +117,7 @@
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(\n",
|
||||
" model=\"gemini-2.0-flash\",\n",
|
||||
" model=\"gemini-2.5-flash\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" timeout=None,\n",
|
||||
@ -242,7 +242,7 @@
|
||||
"\n",
|
||||
"### Image Input\n",
|
||||
"\n",
|
||||
"Provide image inputs along with text using a `HumanMessage` with a list content format. The `gemini-2.0-flash` model can handle images."
|
||||
"Provide image inputs along with text using a `HumanMessage` with a list content format. Make sure to use a model that supports image input, such as `gemini-2.5-flash`."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -297,7 +297,7 @@
|
||||
"\n",
|
||||
"### Audio Input\n",
|
||||
"\n",
|
||||
"Provide audio file inputs along with text. Use a model like `gemini-2.0-flash`."
|
||||
"Provide audio file inputs along with text."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -340,7 +340,7 @@
|
||||
"source": [
|
||||
"### Video Input\n",
|
||||
"\n",
|
||||
"Provide video file inputs along with text. Use a model like `gemini-2.0-flash`."
|
||||
"Provide video file inputs along with text."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -384,7 +384,7 @@
|
||||
"source": [
|
||||
"### Image Generation (Multimodal Output)\n",
|
||||
"\n",
|
||||
"The `gemini-2.0-flash` model can generate text and images inline (image generation is experimental). You need to specify the desired `response_modalities`."
|
||||
"Certain models (such as `gemini-2.0-flash-preview-image-generation`) can generate text and images inline. You need to specify the desired `response_modalities`. See more information on the [Gemini API docs](https://ai.google.dev/gemini-api/docs/image-generation) for details."
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -830,7 +830,7 @@
|
||||
"source": [
|
||||
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
|
||||
"\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def run_async_calls():\n",
|
||||
@ -900,7 +900,7 @@
|
||||
"source": [
|
||||
"## API reference\n",
|
||||
"\n",
|
||||
"For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html"
|
||||
"For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_genai/chat_models/langchain_google_genai.chat_models.ChatGoogleGenerativeAI.html)."
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -19,7 +19,7 @@
|
||||
"\n",
|
||||
"This page provides a quick overview for getting started with VertexAI [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatVertexAI features and configurations head to the [API reference](https://python.langchain.com/api_reference/google_vertexai/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html).\n",
|
||||
"\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview).\n",
|
||||
"ChatVertexAI exposes all foundational models available in Google Cloud, like `gemini-2.5-pro`, `gemini-2.5-flash`, etc. For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models).\n",
|
||||
"\n",
|
||||
":::info Google Cloud VertexAI vs Google PaLM\n",
|
||||
"\n",
|
||||
@ -60,7 +60,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": null,
|
||||
"id": "a15d341e-3e26-4ca3-830b-5aab30ed66de",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -109,7 +109,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": null,
|
||||
"id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -117,7 +117,7 @@
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(\n",
|
||||
" model=\"gemini-1.5-flash-001\",\n",
|
||||
" model=\"gemini-2.5-flash\",\n",
|
||||
" temperature=0,\n",
|
||||
" max_tokens=None,\n",
|
||||
" max_retries=6,\n",
|
||||
@ -210,7 +210,7 @@
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"google_search\": {}}])\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.5-flash\").bind_tools([{\"google_search\": {}}])\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is today's news?\")"
|
||||
]
|
||||
@ -237,7 +237,7 @@
|
||||
"source": [
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.0-flash-001\").bind_tools([{\"code_execution\": {}}])\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-2.5-flash\").bind_tools([{\"code_execution\": {}}])\n",
|
||||
"\n",
|
||||
"response = llm.invoke(\"What is 3^3?\")"
|
||||
]
|
||||
|
@ -23,13 +23,9 @@
|
||||
"\n",
|
||||
"**Note:** This is separate from the `Google Generative AI` integration, it exposes [Vertex AI Generative API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`.\n",
|
||||
"\n",
|
||||
"VertexAI exposes all foundational models available in google cloud:\n",
|
||||
"- Gemini for Text ( `gemini-1.0-pro` )\n",
|
||||
"- Gemini with Multimodality ( `gemini-1.5-pro-001` and `gemini-pro-vision`)\n",
|
||||
"- Palm 2 for Text (`text-bison`)\n",
|
||||
"- Codey for Code Generation (`code-bison`)\n",
|
||||
"VertexAI exposes all foundational models available in google cloud.\n",
|
||||
"\n",
|
||||
"For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/overview)"
|
||||
"For a full and updated list of available models visit [VertexAI documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/models)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -47,7 +43,7 @@
|
||||
"\n",
|
||||
"To use `Vertex AI Generative AI` you must have the `langchain-google-vertexai` Python package installed and either:\n",
|
||||
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
|
||||
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
|
||||
"- Store the path to a service account JSON file as the `GOOGLE_APPLICATION_CREDENTIALS` environment variable\n",
|
||||
"\n",
|
||||
"This codebase uses the `google.auth` library which first looks for the application credentials variable mentioned above, and then looks for system-level auth.\n",
|
||||
"\n",
|
||||
@ -84,31 +80,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_google_vertexai import VertexAI\n",
|
||||
"\n",
|
||||
"# To use model\n",
|
||||
"model = VertexAI(model_name=\"gemini-pro\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"NOTE : You can also specify a [Gemini Version](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#gemini-model-versions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To specify a particular model version\n",
|
||||
"model = VertexAI(model_name=\"gemini-1.0-pro-002\")"
|
||||
"model = VertexAI(model_name=\"gemini-2.5-pro\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -285,7 +264,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -301,7 +280,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# You may also pass safety_settings to generate method\n",
|
||||
"llm = VertexAI(model_name=\"gemini-1.0-pro-001\")\n",
|
||||
"llm = VertexAI(model_name=\"gemini-2.5-pro\")\n",
|
||||
"\n",
|
||||
"# invoke a model response\n",
|
||||
"output = llm.invoke(\n",
|
||||
@ -622,15 +601,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"# Use Gemini 1.5 Pro\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
|
||||
"llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -683,15 +661,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"# Use Gemini 1.5 Pro\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
|
||||
"llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -741,20 +718,19 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using Audio with Gemini 1.5 Pro"
|
||||
"### Using Audio with Gemini Models"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_core.messages import HumanMessage\n",
|
||||
"from langchain_google_vertexai import ChatVertexAI\n",
|
||||
"\n",
|
||||
"# Use Gemini 1.5 Pro\n",
|
||||
"llm = ChatVertexAI(model=\"gemini-1.5-pro-001\")"
|
||||
"llm = ChatVertexAI(model=\"gemini-2.5-pro\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1226,9 +1202,6 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"NOTE : Specify the correct [Claude 3 Model Versions](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#claude-opus)\n",
|
||||
"- For Claude 3 Opus (Preview), use `claude-3-opus@20240229`.\n",
|
||||
"- For Claude 3 Sonnet, use `claude-3-sonnet@20240229`.\n",
|
||||
"- For Claude 3 Haiku, use `claude-3-haiku@20240307`.\n",
|
||||
"\n",
|
||||
"We don't recommend using the Anthropic Claude 3 model versions that don't include a suffix that starts with an @ symbol (claude-3-opus, claude-3-sonnet, or claude-3-haiku)."
|
||||
]
|
||||
|
@ -29,14 +29,14 @@ export GOOGLE_API_KEY="YOUR_API_KEY"
|
||||
|
||||
### Chat Models
|
||||
|
||||
Use the `ChatGoogleGenerativeAI` class to interact with Gemini 2.0 and 2.5 models. See
|
||||
Use the `ChatGoogleGenerativeAI` class to interact with Gemini models. See
|
||||
details in [this guide](/docs/integrations/chat/google_generative_ai).
|
||||
|
||||
```python
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
|
||||
llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
|
||||
|
||||
# Simple text invocation
|
||||
result = llm.invoke("Sing a ballad of LangChain.")
|
||||
@ -61,14 +61,14 @@ The `image_url` can be a public URL, a GCS URI (`gs://...`), a local file path,
|
||||
|
||||
### Embedding Models
|
||||
|
||||
Generate text embeddings using models like `gemini-embedding-exp-03-07` with the `GoogleGenerativeAIEmbeddings` class.
|
||||
Generate text embeddings using models like `gemini-embedding-001` with the `GoogleGenerativeAIEmbeddings` class.
|
||||
|
||||
See a [usage example](/docs/integrations/text_embedding/google_generative_ai).
|
||||
|
||||
```python
|
||||
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
||||
|
||||
embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-exp-03-07")
|
||||
embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
|
||||
vector = embeddings.embed_query("What are embeddings?")
|
||||
print(vector[:5])
|
||||
```
|
||||
@ -83,7 +83,7 @@ See a [usage example](/docs/integrations/llms/google_ai).
|
||||
```python
|
||||
from langchain_google_genai import GoogleGenerativeAI
|
||||
|
||||
llm = GoogleGenerativeAI(model="gemini-2.0-flash")
|
||||
llm = GoogleGenerativeAI(model="gemini-2.5-flash")
|
||||
result = llm.invoke("Sing a ballad of LangChain.")
|
||||
print(result)
|
||||
```
|
||||
@ -105,7 +105,7 @@ Google Cloud integrations typically use Application Default Credentials (ADC). R
|
||||
|
||||
#### Vertex AI
|
||||
|
||||
Access chat models like `Gemini` via the Vertex AI platform.
|
||||
Access chat models like Gemini via the Vertex AI platform.
|
||||
|
||||
See a [usage example](/docs/integrations/chat/google_vertex_ai_palm).
|
||||
|
||||
@ -135,7 +135,7 @@ from langchain_google_vertexai.model_garden_maas.mistral import VertexModelGarde
|
||||
|
||||
#### Gemma local from Hugging Face
|
||||
|
||||
>Local `Gemma` model loaded from `HuggingFace`. Requires `langchain-google-vertexai`.
|
||||
>Local Gemma model loaded from HuggingFace. Requires `langchain-google-vertexai`.
|
||||
|
||||
```python
|
||||
from langchain_google_vertexai.gemma import GemmaChatLocalHF
|
||||
@ -143,7 +143,7 @@ from langchain_google_vertexai.gemma import GemmaChatLocalHF
|
||||
|
||||
#### Gemma local from Kaggle
|
||||
|
||||
>Local `Gemma` model loaded from `Kaggle`. Requires `langchain-google-vertexai`.
|
||||
>Local Gemma model loaded from Kaggle. Requires `langchain-google-vertexai`.
|
||||
|
||||
```python
|
||||
from langchain_google_vertexai.gemma import GemmaChatLocalKaggle
|
||||
@ -159,7 +159,7 @@ from langchain_google_vertexai.gemma import GemmaChatVertexAIModelGarden
|
||||
|
||||
#### Vertex AI image captioning
|
||||
|
||||
>Implementation of the `Image Captioning model` as a chat. Requires `langchain-google-vertexai`.
|
||||
>Implementation of the Image Captioning model as a chat. Requires `langchain-google-vertexai`.
|
||||
|
||||
```python
|
||||
from langchain_google_vertexai.vision_models import VertexAIImageCaptioningChat
|
||||
@ -196,7 +196,7 @@ interface.
|
||||
|
||||
#### Vertex AI Model Garden
|
||||
|
||||
Access `Gemini`, and hundreds of OSS models via `Vertex AI Model Garden` service. Requires `langchain-google-vertexai`.
|
||||
Access Gemini, and hundreds of OSS models via Vertex AI Model Garden service. Requires `langchain-google-vertexai`.
|
||||
|
||||
See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model-garden).
|
||||
|
||||
@ -206,7 +206,7 @@ from langchain_google_vertexai import VertexAIModelGarden
|
||||
|
||||
#### Gemma local from Hugging Face
|
||||
|
||||
>Local `Gemma` model loaded from `HuggingFace`. Requires `langchain-google-vertexai`.
|
||||
>Local Gemma model loaded from HuggingFace. Requires `langchain-google-vertexai`.
|
||||
|
||||
```python
|
||||
from langchain_google_vertexai.gemma import GemmaLocalHF
|
||||
@ -214,7 +214,7 @@ from langchain_google_vertexai.gemma import GemmaLocalHF
|
||||
|
||||
#### Gemma local from Kaggle
|
||||
|
||||
>Local `Gemma` model loaded from `Kaggle`. Requires `langchain-google-vertexai`.
|
||||
>Local Gemma model loaded from Kaggle. Requires `langchain-google-vertexai`.
|
||||
|
||||
```python
|
||||
from langchain_google_vertexai.gemma import GemmaLocalKaggle
|
||||
@ -230,7 +230,7 @@ from langchain_google_vertexai.gemma import GemmaVertexAIModelGarden
|
||||
|
||||
#### Vertex AI image captioning
|
||||
|
||||
>Implementation of the `Image Captioning model` as an LLM. Requires `langchain-google-vertexai`.
|
||||
>Implementation of the Image Captioning model as an LLM. Requires `langchain-google-vertexai`.
|
||||
|
||||
```python
|
||||
from langchain_google_vertexai.vision_models import VertexAIImageCaptioning
|
||||
@ -1138,7 +1138,7 @@ Integrations with various Google services beyond the core Cloud Platform.
|
||||
|
||||
#### Google Drive
|
||||
|
||||
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) file storage. Currently supports `Google Docs`.
|
||||
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) file storage. Currently supports Google Docs.
|
||||
|
||||
Install with Drive dependencies:
|
||||
|
||||
@ -1416,7 +1416,7 @@ from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
|
||||
#### YouTube Search Tool
|
||||
|
||||
>Search `YouTube` videos without the official API. Requires `youtube_search` package.
|
||||
>Search YouTube videos without the official API. Requires `youtube_search` package.
|
||||
|
||||
```bash
|
||||
pip install youtube_search langchain # Requires base langchain
|
||||
|
@ -101,7 +101,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"execution_count": null,
|
||||
"id": "eedc551e-a1f3-4fd8-8d65-4e0784c4441b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -123,7 +123,7 @@
|
||||
"source": [
|
||||
"from langchain_google_genai import GoogleGenerativeAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-exp-03-07\")\n",
|
||||
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/gemini-embedding-001\")\n",
|
||||
"vector = embeddings.embed_query(\"hello, world!\")\n",
|
||||
"vector[:5]"
|
||||
]
|
||||
@ -245,7 +245,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": null,
|
||||
"id": "f1f077db-8eb4-49f7-8866-471a8528dcdb",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -267,10 +267,10 @@
|
||||
"from sklearn.metrics.pairwise import cosine_similarity\n",
|
||||
"\n",
|
||||
"query_embeddings = GoogleGenerativeAIEmbeddings(\n",
|
||||
" model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_QUERY\"\n",
|
||||
" model=\"models/gemini-embedding-001\", task_type=\"RETRIEVAL_QUERY\"\n",
|
||||
")\n",
|
||||
"doc_embeddings = GoogleGenerativeAIEmbeddings(\n",
|
||||
" model=\"models/gemini-embedding-exp-03-07\", task_type=\"RETRIEVAL_DOCUMENT\"\n",
|
||||
" model=\"models/gemini-embedding-001\", task_type=\"RETRIEVAL_DOCUMENT\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"q_embed = query_embeddings.embed_query(\"What is the capital of France?\")\n",
|
||||
|
@ -253,7 +253,7 @@
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"# Initialize the LLM\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
|
||||
"\n",
|
||||
"# Initialize the Bright Data Web Scraper API tool\n",
|
||||
"scraper_tool = BrightDataWebScraperAPI(bright_data_api_key=\"your-api-key\")\n",
|
||||
|
@ -233,7 +233,7 @@
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"# Initialize the LLM\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
|
||||
"\n",
|
||||
"# Initialize the Bright Data SERP tool\n",
|
||||
"serp_tool = BrightDataSERP(\n",
|
||||
|
@ -275,7 +275,7 @@
|
||||
"from langgraph.prebuilt import create_react_agent\n",
|
||||
"\n",
|
||||
"# Initialize the LLM\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\", google_api_key=\"your-api-key\")\n",
|
||||
"llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash\", google_api_key=\"your-api-key\")\n",
|
||||
"\n",
|
||||
"# Initialize the tool\n",
|
||||
"bright_data_tool = BrightDataUnlocker(bright_data_api_key=\"your-api-key\")\n",
|
||||
|
@ -36,17 +36,17 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
|
||||
|
||||
return (
|
||||
<div style={{ display: 'flex', alignItems: 'center', marginBottom: '1rem', gap: '0.75rem' }}>
|
||||
<span style={{
|
||||
<span style={{
|
||||
fontSize: '1rem',
|
||||
fontWeight: '500',
|
||||
}}>
|
||||
Select <a href={link}>{text}</a>:
|
||||
</span>
|
||||
<div className={`dropdown ${isOpen ? 'dropdown--show' : ''}`}>
|
||||
<button
|
||||
className="button button--secondary"
|
||||
<button
|
||||
className="button button--secondary"
|
||||
onClick={() => setIsOpen(!isOpen)}
|
||||
style={{
|
||||
style={{
|
||||
backgroundColor: 'var(--ifm-background-color)',
|
||||
border: '1px solid var(--ifm-color-emphasis-300)',
|
||||
fontWeight: 'normal',
|
||||
@ -56,7 +56,7 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
|
||||
}}
|
||||
>
|
||||
{selectedOption.label}
|
||||
<span style={{
|
||||
<span style={{
|
||||
marginLeft: '0.4rem',
|
||||
fontSize: '0.875rem'
|
||||
}}>▾</span>
|
||||
@ -69,9 +69,9 @@ export const CustomDropdown = ({ selectedOption, options, onSelect, modelType })
|
||||
}}>
|
||||
{options.map((option) => (
|
||||
<li key={option.value}>
|
||||
<a
|
||||
<a
|
||||
className={`dropdown__link ${option.value === selectedOption.value ? 'dropdown__link--active' : ''}`}
|
||||
href="#"
|
||||
href="#"
|
||||
onClick={(e) => {
|
||||
e.preventDefault();
|
||||
onSelect(option.value);
|
||||
@ -138,14 +138,14 @@ ${llmVarName} = AzureChatOpenAI(
|
||||
{
|
||||
value: "google_genai",
|
||||
label: "Google Gemini",
|
||||
model: "gemini-2.0-flash",
|
||||
model: "gemini-2.5-flash",
|
||||
apiKeyName: "GOOGLE_API_KEY",
|
||||
packageName: "langchain[google-genai]",
|
||||
},
|
||||
{
|
||||
value: "google_vertexai",
|
||||
label: "Google Vertex",
|
||||
model: "gemini-2.0-flash-001",
|
||||
model: "gemini-2.5-flash",
|
||||
apiKeyText: "# Ensure your VertexAI credentials are configured",
|
||||
packageName: "langchain[google-vertexai]",
|
||||
},
|
||||
@ -204,8 +204,8 @@ ${llmVarName} = AzureChatOpenAI(
|
||||
text: `from langchain_ibm import ChatWatsonx
|
||||
|
||||
${llmVarName} = ChatWatsonx(
|
||||
model_id="ibm/granite-34b-code-instruct",
|
||||
url="https://us-south.ml.cloud.ibm.com",
|
||||
model_id="ibm/granite-34b-code-instruct",
|
||||
url="https://us-south.ml.cloud.ibm.com",
|
||||
project_id="<WATSONX PROJECT_ID>"
|
||||
)`,
|
||||
apiKeyName: "WATSONX_APIKEY",
|
||||
@ -238,18 +238,18 @@ ${llmVarName} = ChatWatsonx(
|
||||
}));
|
||||
|
||||
const modelOptions = tabItems
|
||||
.map((item) => ({
|
||||
value: item.value,
|
||||
label: item.label,
|
||||
}));
|
||||
.map((item) => ({
|
||||
value: item.value,
|
||||
label: item.label,
|
||||
}));
|
||||
|
||||
const selectedTabItem = tabItems.find(
|
||||
(option) => option.value === selectedModel
|
||||
);
|
||||
|
||||
let apiKeyText = "";
|
||||
if (selectedTabItem.apiKeyName) {
|
||||
apiKeyText = `import getpass
|
||||
let apiKeyText = "";
|
||||
if (selectedTabItem.apiKeyName) {
|
||||
apiKeyText = `import getpass
|
||||
import os
|
||||
|
||||
if not os.environ.get("${selectedTabItem.apiKeyName}"):
|
||||
@ -264,7 +264,7 @@ ${llmVarName} = init_chat_model("${selectedTabItem.model}", model_provider="${se
|
||||
|
||||
return (
|
||||
<div>
|
||||
<CustomDropdown
|
||||
<CustomDropdown
|
||||
selectedOption={selectedTabItem}
|
||||
options={modelOptions}
|
||||
onSelect={setSelectedModel}
|
||||
@ -279,4 +279,4 @@ ${llmVarName} = init_chat_model("${selectedTabItem.model}", model_provider="${se
|
||||
</CodeBlock>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -3,206 +3,206 @@ import CodeBlock from "@theme-original/CodeBlock";
|
||||
import { CustomDropdown } from './ChatModelTabs';
|
||||
|
||||
export default function EmbeddingTabs(props) {
|
||||
const [selectedModel, setSelectedModel] = useState("OpenAI");
|
||||
const {
|
||||
openaiParams,
|
||||
hideOpenai,
|
||||
azureOpenaiParams,
|
||||
hideAzureOpenai,
|
||||
googleGenAIParams,
|
||||
hideGoogleGenAI,
|
||||
googleVertexAIParams,
|
||||
hideGoogleVertexAI,
|
||||
awsParams,
|
||||
hideAws,
|
||||
huggingFaceParams,
|
||||
hideHuggingFace,
|
||||
ollamaParams,
|
||||
hideOllama,
|
||||
cohereParams,
|
||||
hideCohere,
|
||||
mistralParams,
|
||||
hideMistral,
|
||||
nomicParams,
|
||||
hideNomic,
|
||||
nvidiaParams,
|
||||
hideNvidia,
|
||||
voyageaiParams,
|
||||
hideVoyageai,
|
||||
ibmParams,
|
||||
hideIBM,
|
||||
fakeEmbeddingParams,
|
||||
hideFakeEmbedding,
|
||||
customVarName,
|
||||
} = props;
|
||||
const [selectedModel, setSelectedModel] = useState("OpenAI");
|
||||
const {
|
||||
openaiParams,
|
||||
hideOpenai,
|
||||
azureOpenaiParams,
|
||||
hideAzureOpenai,
|
||||
googleGenAIParams,
|
||||
hideGoogleGenAI,
|
||||
googleVertexAIParams,
|
||||
hideGoogleVertexAI,
|
||||
awsParams,
|
||||
hideAws,
|
||||
huggingFaceParams,
|
||||
hideHuggingFace,
|
||||
ollamaParams,
|
||||
hideOllama,
|
||||
cohereParams,
|
||||
hideCohere,
|
||||
mistralParams,
|
||||
hideMistral,
|
||||
nomicParams,
|
||||
hideNomic,
|
||||
nvidiaParams,
|
||||
hideNvidia,
|
||||
voyageaiParams,
|
||||
hideVoyageai,
|
||||
ibmParams,
|
||||
hideIBM,
|
||||
fakeEmbeddingParams,
|
||||
hideFakeEmbedding,
|
||||
customVarName,
|
||||
} = props;
|
||||
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="text-embedding-3-large"`;
|
||||
const azureParamsOrDefault =
|
||||
azureOpenaiParams ??
|
||||
`\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`;
|
||||
const googleGenAIParamsOrDefault = googleGenAIParams ?? `model="models/embedding-001"`;
|
||||
const googleVertexAIParamsOrDefault = googleVertexAIParams ?? `model="text-embedding-004"`;
|
||||
const awsParamsOrDefault = awsParams ?? `model_id="amazon.titan-embed-text-v2:0"`;
|
||||
const huggingFaceParamsOrDefault = huggingFaceParams ?? `model_name="sentence-transformers/all-mpnet-base-v2"`;
|
||||
const ollamaParamsOrDefault = ollamaParams ?? `model="llama3"`;
|
||||
const cohereParamsOrDefault = cohereParams ?? `model="embed-english-v3.0"`;
|
||||
const mistralParamsOrDefault = mistralParams ?? `model="mistral-embed"`;
|
||||
const nomicsParamsOrDefault = nomicParams ?? `model="nomic-embed-text-v1.5"`;
|
||||
const nvidiaParamsOrDefault = nvidiaParams ?? `model="NV-Embed-QA"`;
|
||||
const voyageaiParamsOrDefault = voyageaiParams ?? `model="voyage-3"`;
|
||||
const ibmParamsOrDefault = ibmParams ??
|
||||
`\n model_id="ibm/slate-125m-english-rtrvr",\n url="https://us-south.ml.cloud.ibm.com",\n project_id="<WATSONX PROJECT_ID>",\n`;
|
||||
const fakeEmbeddingParamsOrDefault = fakeEmbeddingParams ?? `size=4096`;
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="text-embedding-3-large"`;
|
||||
const azureParamsOrDefault =
|
||||
azureOpenaiParams ??
|
||||
`\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`;
|
||||
const googleGenAIParamsOrDefault = googleGenAIParams ?? `model="models/gemini-embedding-001"`;
|
||||
const googleVertexAIParamsOrDefault = googleVertexAIParams ?? `model="text-embedding-005"`;
|
||||
const awsParamsOrDefault = awsParams ?? `model_id="amazon.titan-embed-text-v2:0"`;
|
||||
const huggingFaceParamsOrDefault = huggingFaceParams ?? `model_name="sentence-transformers/all-mpnet-base-v2"`;
|
||||
const ollamaParamsOrDefault = ollamaParams ?? `model="llama3"`;
|
||||
const cohereParamsOrDefault = cohereParams ?? `model="embed-english-v3.0"`;
|
||||
const mistralParamsOrDefault = mistralParams ?? `model="mistral-embed"`;
|
||||
const nomicsParamsOrDefault = nomicParams ?? `model="nomic-embed-text-v1.5"`;
|
||||
const nvidiaParamsOrDefault = nvidiaParams ?? `model="NV-Embed-QA"`;
|
||||
const voyageaiParamsOrDefault = voyageaiParams ?? `model="voyage-3"`;
|
||||
const ibmParamsOrDefault = ibmParams ??
|
||||
`\n model_id="ibm/slate-125m-english-rtrvr",\n url="https://us-south.ml.cloud.ibm.com",\n project_id="<WATSONX PROJECT_ID>",\n`;
|
||||
const fakeEmbeddingParamsOrDefault = fakeEmbeddingParams ?? `size=4096`;
|
||||
|
||||
const embeddingVarName = customVarName ?? "embeddings";
|
||||
const embeddingVarName = customVarName ?? "embeddings";
|
||||
|
||||
const tabItems = [
|
||||
{
|
||||
value: "OpenAI",
|
||||
label: "OpenAI",
|
||||
text: `from langchain_openai import OpenAIEmbeddings\n\n${embeddingVarName} = OpenAIEmbeddings(${openAIParamsOrDefault})`,
|
||||
apiKeyName: "OPENAI_API_KEY",
|
||||
packageName: "langchain-openai",
|
||||
default: true,
|
||||
shouldHide: hideOpenai,
|
||||
},
|
||||
{
|
||||
value: "Azure",
|
||||
label: "Azure",
|
||||
text: `from langchain_openai import AzureOpenAIEmbeddings\n\n${embeddingVarName} = AzureOpenAIEmbeddings(${azureParamsOrDefault})`,
|
||||
apiKeyName: "AZURE_OPENAI_API_KEY",
|
||||
packageName: "langchain-openai",
|
||||
default: false,
|
||||
shouldHide: hideAzureOpenai,
|
||||
},
|
||||
{
|
||||
value: "GoogleGenAI",
|
||||
label: "Google Gemini",
|
||||
text: `from langchain_google_genai import GoogleGenerativeAIEmbeddings\n\n${embeddingVarName} = GoogleGenerativeAIEmbeddings(${googleGenAIParamsOrDefault})`,
|
||||
apiKeyName: "GOOGLE_API_KEY",
|
||||
packageName: "langchain-google-genai",
|
||||
default: false,
|
||||
shouldHide: hideGoogleGenAI,
|
||||
},
|
||||
{
|
||||
value: "GoogleVertexAI",
|
||||
label: "Google Vertex",
|
||||
text: `from langchain_google_vertexai import VertexAIEmbeddings\n\n${embeddingVarName} = VertexAIEmbeddings(${googleVertexAIParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-google-vertexai",
|
||||
default: false,
|
||||
shouldHide: hideGoogleVertexAI,
|
||||
},
|
||||
{
|
||||
value: "AWS",
|
||||
label: "AWS",
|
||||
text: `from langchain_aws import BedrockEmbeddings\n\n${embeddingVarName} = BedrockEmbeddings(${awsParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-aws",
|
||||
default: false,
|
||||
shouldHide: hideAws,
|
||||
},
|
||||
{
|
||||
value: "HuggingFace",
|
||||
label: "HuggingFace",
|
||||
text: `from langchain_huggingface import HuggingFaceEmbeddings\n\n${embeddingVarName} = HuggingFaceEmbeddings(${huggingFaceParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-huggingface",
|
||||
default: false,
|
||||
shouldHide: hideHuggingFace,
|
||||
},
|
||||
{
|
||||
value: "Ollama",
|
||||
label: "Ollama",
|
||||
text: `from langchain_ollama import OllamaEmbeddings\n\n${embeddingVarName} = OllamaEmbeddings(${ollamaParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-ollama",
|
||||
default: false,
|
||||
shouldHide: hideOllama,
|
||||
},
|
||||
{
|
||||
value: "Cohere",
|
||||
label: "Cohere",
|
||||
text: `from langchain_cohere import CohereEmbeddings\n\n${embeddingVarName} = CohereEmbeddings(${cohereParamsOrDefault})`,
|
||||
apiKeyName: "COHERE_API_KEY",
|
||||
packageName: "langchain-cohere",
|
||||
default: false,
|
||||
shouldHide: hideCohere,
|
||||
},
|
||||
{
|
||||
value: "MistralAI",
|
||||
label: "MistralAI",
|
||||
text: `from langchain_mistralai import MistralAIEmbeddings\n\n${embeddingVarName} = MistralAIEmbeddings(${mistralParamsOrDefault})`,
|
||||
apiKeyName: "MISTRALAI_API_KEY",
|
||||
packageName: "langchain-mistralai",
|
||||
default: false,
|
||||
shouldHide: hideMistral,
|
||||
},
|
||||
{
|
||||
value: "Nomic",
|
||||
label: "Nomic",
|
||||
text: `from langchain_nomic import NomicEmbeddings\n\n${embeddingVarName} = NomicEmbeddings(${nomicsParamsOrDefault})`,
|
||||
apiKeyName: "NOMIC_API_KEY",
|
||||
packageName: "langchain-nomic",
|
||||
default: false,
|
||||
shouldHide: hideNomic,
|
||||
},
|
||||
{
|
||||
value: "NVIDIA",
|
||||
label: "NVIDIA",
|
||||
text: `from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings\n\n${embeddingVarName} = NVIDIAEmbeddings(${nvidiaParamsOrDefault})`,
|
||||
apiKeyName: "NVIDIA_API_KEY",
|
||||
packageName: "langchain-nvidia-ai-endpoints",
|
||||
default: false,
|
||||
shouldHide: hideNvidia,
|
||||
},
|
||||
{
|
||||
value: "Voyage AI",
|
||||
label: "Voyage AI",
|
||||
text: `from langchain-voyageai import VoyageAIEmbeddings\n\n${embeddingVarName} = VoyageAIEmbeddings(${voyageaiParamsOrDefault})`,
|
||||
apiKeyName: "VOYAGE_API_KEY",
|
||||
packageName: "langchain-voyageai",
|
||||
default: false,
|
||||
shouldHide: hideVoyageai,
|
||||
},
|
||||
{
|
||||
value: "IBM",
|
||||
label: "IBM watsonx",
|
||||
text: `from langchain_ibm import WatsonxEmbeddings\n\n${embeddingVarName} = WatsonxEmbeddings(${ibmParamsOrDefault})`,
|
||||
apiKeyName: "WATSONX_APIKEY",
|
||||
packageName: "langchain-ibm",
|
||||
default: false,
|
||||
shouldHide: hideIBM,
|
||||
},
|
||||
{
|
||||
value: "Fake",
|
||||
label: "Fake",
|
||||
text: `from langchain_core.embeddings import DeterministicFakeEmbedding\n\n${embeddingVarName} = DeterministicFakeEmbedding(${fakeEmbeddingParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-core",
|
||||
default: false,
|
||||
shouldHide: hideFakeEmbedding,
|
||||
},
|
||||
];
|
||||
|
||||
const tabItems = [
|
||||
{
|
||||
value: "OpenAI",
|
||||
label: "OpenAI",
|
||||
text: `from langchain_openai import OpenAIEmbeddings\n\n${embeddingVarName} = OpenAIEmbeddings(${openAIParamsOrDefault})`,
|
||||
apiKeyName: "OPENAI_API_KEY",
|
||||
packageName: "langchain-openai",
|
||||
default: true,
|
||||
shouldHide: hideOpenai,
|
||||
},
|
||||
{
|
||||
value: "Azure",
|
||||
label: "Azure",
|
||||
text: `from langchain_openai import AzureOpenAIEmbeddings\n\n${embeddingVarName} = AzureOpenAIEmbeddings(${azureParamsOrDefault})`,
|
||||
apiKeyName: "AZURE_OPENAI_API_KEY",
|
||||
packageName: "langchain-openai",
|
||||
default: false,
|
||||
shouldHide: hideAzureOpenai,
|
||||
},
|
||||
{
|
||||
value: "GoogleGenAI",
|
||||
label: "Google Gemini",
|
||||
text: `from langchain_google_genai import GoogleGenerativeAIEmbeddings\n\n${embeddingVarName} = GoogleGenerativeAIEmbeddings(${googleGenAIParamsOrDefault})`,
|
||||
apiKeyName: "GOOGLE_API_KEY",
|
||||
packageName: "langchain-google-genai",
|
||||
default: false,
|
||||
shouldHide: hideGoogleGenAI,
|
||||
},
|
||||
{
|
||||
value: "GoogleVertexAI",
|
||||
label: "Google Vertex",
|
||||
text: `from langchain_google_vertexai import VertexAIEmbeddings\n\n${embeddingVarName} = VertexAIEmbeddings(${googleVertexAIParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-google-vertexai",
|
||||
default: false,
|
||||
shouldHide: hideGoogleVertexAI,
|
||||
},
|
||||
{
|
||||
value: "AWS",
|
||||
label: "AWS",
|
||||
text: `from langchain_aws import BedrockEmbeddings\n\n${embeddingVarName} = BedrockEmbeddings(${awsParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-aws",
|
||||
default: false,
|
||||
shouldHide: hideAws,
|
||||
},
|
||||
{
|
||||
value: "HuggingFace",
|
||||
label: "HuggingFace",
|
||||
text: `from langchain_huggingface import HuggingFaceEmbeddings\n\n${embeddingVarName} = HuggingFaceEmbeddings(${huggingFaceParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-huggingface",
|
||||
default: false,
|
||||
shouldHide: hideHuggingFace,
|
||||
},
|
||||
{
|
||||
value: "Ollama",
|
||||
label: "Ollama",
|
||||
text: `from langchain_ollama import OllamaEmbeddings\n\n${embeddingVarName} = OllamaEmbeddings(${ollamaParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-ollama",
|
||||
default: false,
|
||||
shouldHide: hideOllama,
|
||||
},
|
||||
{
|
||||
value: "Cohere",
|
||||
label: "Cohere",
|
||||
text: `from langchain_cohere import CohereEmbeddings\n\n${embeddingVarName} = CohereEmbeddings(${cohereParamsOrDefault})`,
|
||||
apiKeyName: "COHERE_API_KEY",
|
||||
packageName: "langchain-cohere",
|
||||
default: false,
|
||||
shouldHide: hideCohere,
|
||||
},
|
||||
{
|
||||
value: "MistralAI",
|
||||
label: "MistralAI",
|
||||
text: `from langchain_mistralai import MistralAIEmbeddings\n\n${embeddingVarName} = MistralAIEmbeddings(${mistralParamsOrDefault})`,
|
||||
apiKeyName: "MISTRALAI_API_KEY",
|
||||
packageName: "langchain-mistralai",
|
||||
default: false,
|
||||
shouldHide: hideMistral,
|
||||
},
|
||||
{
|
||||
value: "Nomic",
|
||||
label: "Nomic",
|
||||
text: `from langchain_nomic import NomicEmbeddings\n\n${embeddingVarName} = NomicEmbeddings(${nomicsParamsOrDefault})`,
|
||||
apiKeyName: "NOMIC_API_KEY",
|
||||
packageName: "langchain-nomic",
|
||||
default: false,
|
||||
shouldHide: hideNomic,
|
||||
},
|
||||
{
|
||||
value: "NVIDIA",
|
||||
label: "NVIDIA",
|
||||
text: `from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings\n\n${embeddingVarName} = NVIDIAEmbeddings(${nvidiaParamsOrDefault})`,
|
||||
apiKeyName: "NVIDIA_API_KEY",
|
||||
packageName: "langchain-nvidia-ai-endpoints",
|
||||
default: false,
|
||||
shouldHide: hideNvidia,
|
||||
},
|
||||
{
|
||||
value: "Voyage AI",
|
||||
label: "Voyage AI",
|
||||
text: `from langchain-voyageai import VoyageAIEmbeddings\n\n${embeddingVarName} = VoyageAIEmbeddings(${voyageaiParamsOrDefault})`,
|
||||
apiKeyName: "VOYAGE_API_KEY",
|
||||
packageName: "langchain-voyageai",
|
||||
default: false,
|
||||
shouldHide: hideVoyageai,
|
||||
},
|
||||
{
|
||||
value: "IBM",
|
||||
label: "IBM watsonx",
|
||||
text: `from langchain_ibm import WatsonxEmbeddings\n\n${embeddingVarName} = WatsonxEmbeddings(${ibmParamsOrDefault})`,
|
||||
apiKeyName: "WATSONX_APIKEY",
|
||||
packageName: "langchain-ibm",
|
||||
default: false,
|
||||
shouldHide: hideIBM,
|
||||
},
|
||||
{
|
||||
value: "Fake",
|
||||
label: "Fake",
|
||||
text: `from langchain_core.embeddings import DeterministicFakeEmbedding\n\n${embeddingVarName} = DeterministicFakeEmbedding(${fakeEmbeddingParamsOrDefault})`,
|
||||
apiKeyName: undefined,
|
||||
packageName: "langchain-core",
|
||||
default: false,
|
||||
shouldHide: hideFakeEmbedding,
|
||||
},
|
||||
];
|
||||
|
||||
const modelOptions = tabItems
|
||||
.filter((item) => !item.shouldHide)
|
||||
.map((item) => ({
|
||||
value: item.value,
|
||||
label: item.label,
|
||||
text: item.text,
|
||||
apiKeyName: item.apiKeyName,
|
||||
apiKeyText: item.apiKeyText,
|
||||
packageName: item.packageName,
|
||||
}));
|
||||
.filter((item) => !item.shouldHide)
|
||||
.map((item) => ({
|
||||
value: item.value,
|
||||
label: item.label,
|
||||
text: item.text,
|
||||
apiKeyName: item.apiKeyName,
|
||||
apiKeyText: item.apiKeyText,
|
||||
packageName: item.packageName,
|
||||
}));
|
||||
|
||||
const selectedOption = modelOptions.find(
|
||||
(option) => option.value === selectedModel
|
||||
);
|
||||
const selectedOption = modelOptions.find(
|
||||
(option) => option.value === selectedModel
|
||||
);
|
||||
|
||||
let apiKeyText = "";
|
||||
if (selectedOption.apiKeyName) {
|
||||
apiKeyText = `import getpass
|
||||
let apiKeyText = "";
|
||||
if (selectedOption.apiKeyName) {
|
||||
apiKeyText = `import getpass
|
||||
import os
|
||||
|
||||
if not os.environ.get("${selectedOption.apiKeyName}"):
|
||||
@ -211,21 +211,21 @@ if not os.environ.get("${selectedOption.apiKeyName}"):
|
||||
apiKeyText = selectedOption.apiKeyText;
|
||||
}
|
||||
|
||||
return (
|
||||
<div>
|
||||
<CustomDropdown
|
||||
selectedOption={selectedOption}
|
||||
options={modelOptions}
|
||||
onSelect={setSelectedModel}
|
||||
modelType="embeddings"
|
||||
/>
|
||||
return (
|
||||
<div>
|
||||
<CustomDropdown
|
||||
selectedOption={selectedOption}
|
||||
options={modelOptions}
|
||||
onSelect={setSelectedModel}
|
||||
modelType="embeddings"
|
||||
/>
|
||||
|
||||
<CodeBlock language="bash">
|
||||
{`pip install -qU ${selectedOption.packageName}`}
|
||||
</CodeBlock>
|
||||
<CodeBlock language="python">
|
||||
{apiKeyText ? apiKeyText + "\n\n" + selectedOption.text : selectedOption.text}
|
||||
</CodeBlock>
|
||||
</div>
|
||||
);
|
||||
<CodeBlock language="bash">
|
||||
{`pip install -qU ${selectedOption.packageName}`}
|
||||
</CodeBlock>
|
||||
<CodeBlock language="python">
|
||||
{apiKeyText ? apiKeyText + "\n\n" + selectedOption.text : selectedOption.text}
|
||||
</CodeBlock>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
@ -3,15 +3,7 @@ from __future__ import annotations
|
||||
import warnings
|
||||
from collections.abc import AsyncIterator, Iterator, Sequence
|
||||
from importlib import util
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Literal,
|
||||
Optional,
|
||||
Union,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
from typing import Any, Callable, Literal, Optional, Union, cast, overload
|
||||
|
||||
from langchain_core.language_models import (
|
||||
BaseChatModel,
|
||||
@ -188,7 +180,7 @@ def init_chat_model(
|
||||
|
||||
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
|
||||
claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
|
||||
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.0-flash", temperature=0)
|
||||
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
|
||||
|
||||
o3_mini.invoke("what's your name")
|
||||
claude_sonnet.invoke("what's your name")
|
||||
|
@ -13,10 +13,7 @@ from typing import (
|
||||
overload,
|
||||
)
|
||||
|
||||
from langchain_core.language_models import (
|
||||
BaseChatModel,
|
||||
LanguageModelInput,
|
||||
)
|
||||
from langchain_core.language_models import BaseChatModel, LanguageModelInput
|
||||
from langchain_core.messages import AnyMessage, BaseMessage
|
||||
from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
|
||||
from typing_extensions import TypeAlias, override
|
||||
@ -177,7 +174,7 @@ def init_chat_model(
|
||||
|
||||
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
|
||||
claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
|
||||
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.0-flash", temperature=0)
|
||||
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
|
||||
|
||||
o3_mini.invoke("what's your name")
|
||||
claude_sonnet.invoke("what's your name")
|
||||
|
Loading…
Reference in New Issue
Block a user