mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-29 18:08:36 +00:00
docs[minor]: Add chat model tabs to docs pages (#19589)
This commit is contained in:
parent
bd02b83acd
commit
ce0a588ae6
@ -40,6 +40,33 @@
|
|||||||
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
|
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "c3d54f72",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
|
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||||
|
"\n",
|
||||||
|
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f9eed8e8",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# | output: false\n",
|
||||||
|
"# | echo: false\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain_openai import ChatOpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI(model=\"gpt-4\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": 1,
|
||||||
@ -60,10 +87,8 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||||
"from langchain_openai import ChatOpenAI\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
|
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
|
||||||
"model = ChatOpenAI(model=\"gpt-4\")\n",
|
|
||||||
"output_parser = StrOutputParser()\n",
|
"output_parser = StrOutputParser()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"chain = prompt | model | output_parser\n",
|
"chain = prompt | model | output_parser\n",
|
||||||
@ -324,6 +349,16 @@
|
|||||||
"For our next example, we want to run a retrieval-augmented generation chain to add some context when responding to questions."
|
"For our next example, we want to run a retrieval-augmented generation chain to add some context when responding to questions."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "b8fe8eb4",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
|
"<ChatModelTabs />\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
@ -338,7 +373,7 @@
|
|||||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
"from langchain_openai import OpenAIEmbeddings\n",
|
||||||
"\n",
|
"\n",
|
||||||
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
|
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
|
||||||
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
|
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
|
||||||
@ -352,7 +387,6 @@
|
|||||||
"Question: {question}\n",
|
"Question: {question}\n",
|
||||||
"\"\"\"\n",
|
"\"\"\"\n",
|
||||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||||
"model = ChatOpenAI()\n",
|
|
||||||
"output_parser = StrOutputParser()\n",
|
"output_parser = StrOutputParser()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"setup_and_retrieval = RunnableParallel(\n",
|
"setup_and_retrieval = RunnableParallel(\n",
|
||||||
@ -495,7 +529,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.4"
|
"version": "3.11.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -10,7 +10,9 @@
|
|||||||
"title: Why use LCEL\n",
|
"title: Why use LCEL\n",
|
||||||
"---\n",
|
"---\n",
|
||||||
"\n",
|
"\n",
|
||||||
"import { ColumnContainer, Column } from \\\"@theme/Columns\\\";"
|
"```{=mdx}\n",
|
||||||
|
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
|
||||||
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -53,10 +55,13 @@
|
|||||||
"## Invoke\n",
|
"## Invoke\n",
|
||||||
"In the simplest case, we just want to pass in a topic string and get back a joke string:\n",
|
"In the simplest case, we just want to pass in a topic string and get back a joke string:\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
"#### Without LCEL\n"
|
"#### Without LCEL\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -95,9 +100,12 @@
|
|||||||
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
|
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -136,14 +144,19 @@
|
|||||||
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
|
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
"\n",
|
"```\n",
|
||||||
"## Stream\n",
|
"## Stream\n",
|
||||||
"If we want to stream results instead, we'll need to change our function:\n",
|
"If we want to stream results instead, we'll need to change our function:\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
|
"\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -184,10 +197,11 @@
|
|||||||
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
|
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
"\n",
|
"```\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
@ -208,15 +222,19 @@
|
|||||||
"id": "b9b41e78-ddeb-44d0-a58b-a0ea0c99a761",
|
"id": "b9b41e78-ddeb-44d0-a58b-a0ea0c99a761",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Batch\n",
|
"## Batch\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we want to run on a batch of inputs in parallel, we'll again need a new function:\n",
|
"If we want to run on a batch of inputs in parallel, we'll again need a new function:\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -244,10 +262,11 @@
|
|||||||
"id": "9b3e9d34-6775-43c1-93d8-684b58e341ab",
|
"id": "9b3e9d34-6775-43c1-93d8-684b58e341ab",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
"\n",
|
"```\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
@ -267,15 +286,18 @@
|
|||||||
"id": "cc5ba36f-eec1-4fc1-8cfe-fa242a7f7809",
|
"id": "cc5ba36f-eec1-4fc1-8cfe-fa242a7f7809",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
"\n",
|
"```\n",
|
||||||
"## Async\n",
|
"## Async\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we need an asynchronous version:\n",
|
"If we need an asynchronous version:\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -311,9 +333,11 @@
|
|||||||
"id": "2f209290-498c-4c17-839e-ee9002919846",
|
"id": "2f209290-498c-4c17-839e-ee9002919846",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -334,13 +358,16 @@
|
|||||||
"id": "1f282129-99a3-40f4-b67f-2d0718b1bea9",
|
"id": "1f282129-99a3-40f4-b67f-2d0718b1bea9",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
"\n",
|
"```\n",
|
||||||
"## Async Batch\n",
|
"## Async Batch\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -370,9 +397,11 @@
|
|||||||
"id": "90691048-17ae-479d-83c2-859e33ddf3eb",
|
"id": "90691048-17ae-479d-83c2-859e33ddf3eb",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -393,15 +422,19 @@
|
|||||||
"id": "f6888245-1ebe-4768-a53b-e1fef6a8b379",
|
"id": "f6888245-1ebe-4768-a53b-e1fef6a8b379",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## LLM instead of chat model\n",
|
"## LLM instead of chat model\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we want to use a completion endpoint instead of a chat endpoint: \n",
|
"If we want to use a completion endpoint instead of a chat endpoint: \n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -433,9 +466,11 @@
|
|||||||
"id": "45342cd6-58c2-4543-9392-773e05ef06e7",
|
"id": "45342cd6-58c2-4543-9392-773e05ef06e7",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -466,15 +501,19 @@
|
|||||||
"id": "ca115eaf-59ef-45c1-aac1-e8b0ce7db250",
|
"id": "ca115eaf-59ef-45c1-aac1-e8b0ce7db250",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Different model provider\n",
|
"## Different model provider\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we want to use Anthropic instead of OpenAI: \n",
|
"If we want to use Anthropic instead of OpenAI: \n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -512,9 +551,11 @@
|
|||||||
"id": "52a0c9f8-e316-42e1-af85-cabeba4b7059",
|
"id": "52a0c9f8-e316-42e1-af85-cabeba4b7059",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -545,15 +586,19 @@
|
|||||||
"id": "d7a91eee-d017-420d-b215-f663dcbf8ed2",
|
"id": "d7a91eee-d017-420d-b215-f663dcbf8ed2",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Runtime configurability\n",
|
"## Runtime configurability\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we wanted to make the choice of chat model or LLM configurable at runtime:\n",
|
"If we wanted to make the choice of chat model or LLM configurable at runtime:\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -634,9 +679,11 @@
|
|||||||
"id": "d1530c5c-6635-4599-9483-6df357ca2d64",
|
"id": "d1530c5c-6635-4599-9483-6df357ca2d64",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### With LCEL\n",
|
"#### With LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -694,15 +741,19 @@
|
|||||||
"id": "370dd4d7-b825-40c4-ae3c-2693cba2f22a",
|
"id": "370dd4d7-b825-40c4-ae3c-2693cba2f22a",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Logging\n",
|
"## Logging\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we want to log our intermediate results:\n",
|
"If we want to log our intermediate results:\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -733,9 +784,11 @@
|
|||||||
"id": "16bd20fd-43cd-4aaf-866f-a53d1f20312d",
|
"id": "16bd20fd-43cd-4aaf-866f-a53d1f20312d",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"Every component has built-in integrations with LangSmith. If we set the following two environment variables, all chain traces are logged to LangSmith.\n",
|
"Every component has built-in integrations with LangSmith. If we set the following two environment variables, all chain traces are logged to LangSmith.\n",
|
||||||
@ -770,16 +823,19 @@
|
|||||||
"id": "e25ce3c5-27a7-4954-9f0e-b94313597135",
|
"id": "e25ce3c5-27a7-4954-9f0e-b94313597135",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>\n",
|
"</ColumnContainer>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Fallbacks\n",
|
"## Fallbacks\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we wanted to add fallback logic, in case one model API is down:\n",
|
"If we wanted to add fallback logic, in case one model API is down:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -823,9 +879,11 @@
|
|||||||
"id": "f7ef59b5-2ce3-479e-a7ac-79e1e2f30e9c",
|
"id": "f7ef59b5-2ce3-479e-a7ac-79e1e2f30e9c",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -850,8 +908,10 @@
|
|||||||
"id": "3af52d36-37c6-4d89-b515-95d7270bb96a",
|
"id": "3af52d36-37c6-4d89-b515-95d7270bb96a",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>"
|
"</ColumnContainer>\n",
|
||||||
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -863,8 +923,10 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Even in this simple case, our LCEL chain succinctly packs in a lot of functionality. As chains become more complex, this becomes especially valuable.\n",
|
"Even in this simple case, our LCEL chain succinctly packs in a lot of functionality. As chains become more complex, this becomes especially valuable.\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"```{=mdx}\n",
|
||||||
"<ColumnContainer>\n",
|
"<ColumnContainer>\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### Without LCEL\n",
|
"#### Without LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -1044,9 +1106,11 @@
|
|||||||
"id": "9fb3d71d-8c69-4dc4-81b7-95cd46b271c2",
|
"id": "9fb3d71d-8c69-4dc4-81b7-95cd46b271c2",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"<Column>\n",
|
"<Column>\n",
|
||||||
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#### LCEL\n",
|
"#### LCEL\n",
|
||||||
"\n"
|
"\n"
|
||||||
@ -1101,8 +1165,10 @@
|
|||||||
"id": "e3637d39",
|
"id": "e3637d39",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
"</Column>\n",
|
"</Column>\n",
|
||||||
"</ColumnContainer>"
|
"</ColumnContainer>\n",
|
||||||
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1141,3 +1207,4 @@
|
|||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 5
|
"nbformat_minor": 5
|
||||||
}
|
}
|
||||||
|
|
@ -12,6 +12,33 @@
|
|||||||
"It can speed up your application by reducing the number of API calls you make to the LLM provider.\n"
|
"It can speed up your application by reducing the number of API calls you make to the LLM provider.\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "289b31de",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```{=mdx}\n",
|
||||||
|
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||||
|
"\n",
|
||||||
|
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "c6641f37",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# | output: false\n",
|
||||||
|
"# | echo: false\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain_openai import ChatOpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"llm = ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": 1,
|
||||||
@ -19,10 +46,8 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain.globals import set_llm_cache\n",
|
"# <!-- ruff: noqa: F821 -->\n",
|
||||||
"from langchain_openai import ChatOpenAI\n",
|
"from langchain.globals import set_llm_cache"
|
||||||
"\n",
|
|
||||||
"llm = ChatOpenAI()"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -38,7 +38,6 @@ Before getting started make sure you have `langchain-core` installed.
|
|||||||
%pip install -qU langchain-core langchain-openai
|
%pip install -qU langchain-core langchain-openai
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import getpass
|
import getpass
|
||||||
import os
|
import os
|
||||||
@ -64,33 +63,21 @@ class Multiply(BaseModel):
|
|||||||
b: int = Field(..., description="Second integer")
|
b: int = Field(..., description="Second integer")
|
||||||
```
|
```
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
<Tabs>
|
import ChatModelTabs from "@theme/ChatModelTabs";
|
||||||
<TabItem value="openai" label="OpenAI" default>
|
|
||||||
|
|
||||||
Set up dependencies and API keys:
|
<ChatModelTabs
|
||||||
|
customVarName="llm"
|
||||||
|
fireworksParams={`model="accounts/fireworks/models/firefunction-v1", temperature=0`}
|
||||||
|
/>
|
||||||
|
|
||||||
```python
|
We can use the `bind_tools()` method to handle converting
|
||||||
%pip install -qU langchain-openai
|
`Multiply` to a "function" and binding it to the model (i.e.,
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
os.environ["OPENAI_API_KEY"] = getpass.getpass()
|
|
||||||
```
|
|
||||||
|
|
||||||
We can use the `ChatOpenAI.bind_tools()` method to handle converting
|
|
||||||
`Multiply` to an OpenAI function and binding it to the model (i.e.,
|
|
||||||
passing it in each time the model is invoked).
|
passing it in each time the model is invoked).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
||||||
llm_with_tools = llm.bind_tools([Multiply])
|
llm_with_tools = llm.bind_tools([Multiply])
|
||||||
llm_with_tools.invoke("what's 3 * 12")
|
llm_with_tools.invoke("what's 3 * 12")
|
||||||
```
|
```
|
||||||
@ -126,8 +113,21 @@ tool_chain.invoke("what's 3 * 12")
|
|||||||
[Multiply(a=3, b=12)]
|
[Multiply(a=3, b=12)]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If our model isn’t using the tool, as is the case here, we can force
|
||||||
|
tool usage by specifying `tool_choice="any"` or by specifying the name
|
||||||
|
of the specific tool we want used:
|
||||||
|
|
||||||
|
```python
|
||||||
|
llm_with_tools = llm.bind_tools([Multiply], tool_choice="Multiply")
|
||||||
|
llm_with_tools.invoke("what's 3 * 12")
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_qIP2bJugb67LGvc6Zhwkvfqc', 'type': 'function', 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
|
||||||
|
```
|
||||||
|
|
||||||
If we wanted to force that a tool is used (and that it is used only
|
If we wanted to force that a tool is used (and that it is used only
|
||||||
once), we can set the `tool_choice` argument:
|
once), we can set the `tool_choice` argument to the name of the tool:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
|
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
|
||||||
@ -143,232 +143,6 @@ AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_f3DApOzb60i
|
|||||||
For more see the [ChatOpenAI API
|
For more see the [ChatOpenAI API
|
||||||
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
|
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="fireworks" label="Fireworks">
|
|
||||||
|
|
||||||
Install dependencies and set API keys:
|
|
||||||
|
|
||||||
```python
|
|
||||||
%pip install -qU langchain-fireworks
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
|
|
||||||
```
|
|
||||||
|
|
||||||
We can use the `ChatFireworks.bind_tools()` method to handle converting
|
|
||||||
`Multiply` to a valid function schema and binding it to the model (i.e.,
|
|
||||||
passing it in each time the model is invoked).
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_fireworks import ChatFireworks
|
|
||||||
|
|
||||||
llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0)
|
|
||||||
llm_with_tools = llm.bind_tools([Multiply])
|
|
||||||
llm_with_tools.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
AIMessage(content='Three multiplied by twelve is 36.')
|
|
||||||
```
|
|
||||||
|
|
||||||
If our model isn’t using the tool, as is the case here, we can force
|
|
||||||
tool usage by specifying `tool_choice="any"` or by specifying the name
|
|
||||||
of the specific tool we want used:
|
|
||||||
|
|
||||||
```python
|
|
||||||
llm_with_tools = llm.bind_tools([Multiply], tool_choice="Multiply")
|
|
||||||
llm_with_tools.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_qIP2bJugb67LGvc6Zhwkvfqc', 'type': 'function', 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
|
|
||||||
```
|
|
||||||
|
|
||||||
We can add a tool parser to extract the tool calls from the generated
|
|
||||||
message to JSON:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
|
|
||||||
|
|
||||||
tool_chain = llm_with_tools | JsonOutputToolsParser()
|
|
||||||
tool_chain.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
|
||||||
```
|
|
||||||
|
|
||||||
Or back to the original Pydantic class:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
|
||||||
|
|
||||||
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
|
||||||
tool_chain.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
[Multiply(a=3, b=12)]
|
|
||||||
```
|
|
||||||
|
|
||||||
For more see the [ChatFireworks](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html#langchain_fireworks.chat_models.ChatFireworks.bind_tools) reference.
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="mistral" label="Mistral">
|
|
||||||
|
|
||||||
Install dependencies and set API keys:
|
|
||||||
|
|
||||||
```python
|
|
||||||
%pip install -qU langchain-mistralai
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
|
|
||||||
```
|
|
||||||
|
|
||||||
We can use the `ChatMistralAI.bind_tools()` method to handle converting
|
|
||||||
`Multiply` to a valid function schema and binding it to the model (i.e.,
|
|
||||||
passing it in each time the model is invoked).
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_mistralai import ChatMistralAI
|
|
||||||
|
|
||||||
llm = ChatMistralAI(model="mistral-large-latest", temperature=0)
|
|
||||||
llm_with_tools = llm.bind_tools([Multiply])
|
|
||||||
llm_with_tools.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'null', 'type': <ToolType.function: 'function'>, 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
|
|
||||||
```
|
|
||||||
|
|
||||||
We can add a tool parser to extract the tool calls from the generated
|
|
||||||
message to JSON:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
|
|
||||||
|
|
||||||
tool_chain = llm_with_tools | JsonOutputToolsParser()
|
|
||||||
tool_chain.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
|
||||||
```
|
|
||||||
|
|
||||||
Or back to the original Pydantic class:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
|
||||||
|
|
||||||
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
|
||||||
tool_chain.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
[Multiply(a=3, b=12)]
|
|
||||||
```
|
|
||||||
|
|
||||||
We can force tool usage by specifying `tool_choice="any"`:
|
|
||||||
|
|
||||||
```python
|
|
||||||
llm_with_tools = llm.bind_tools([Multiply], tool_choice="any")
|
|
||||||
llm_with_tools.invoke("I don't even want you to use the tool")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'null', 'type': <ToolType.function: 'function'>, 'function': {'name': 'Multiply', 'arguments': '{"a": 5, "b": 7}'}}]})
|
|
||||||
```
|
|
||||||
|
|
||||||
For more see the [ChatMistralAI API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html#langchain_mistralai.chat_models.ChatMistralAI).
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="together" label="Together">
|
|
||||||
|
|
||||||
Since TogetherAI is a drop-in replacement for OpenAI, we can just use
|
|
||||||
the OpenAI integration.
|
|
||||||
|
|
||||||
Install dependencies and set API keys:
|
|
||||||
|
|
||||||
```python
|
|
||||||
%pip install -qU langchain-openai
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
|
|
||||||
```
|
|
||||||
|
|
||||||
We can use the `ChatOpenAI.bind_tools()` method to handle converting
|
|
||||||
`Multiply` to a valid function schema and binding it to the model (i.e.,
|
|
||||||
passing it in each time the model is invoked).
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
llm = ChatOpenAI(
|
|
||||||
base_url="https://api.together.xyz/v1",
|
|
||||||
api_key=os.environ["TOGETHER_API_KEY"],
|
|
||||||
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
||||||
)
|
|
||||||
llm_with_tools = llm.bind_tools([Multiply])
|
|
||||||
llm_with_tools.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_4tc61dp0478zafqe33hfriee', 'function': {'arguments': '{"a":3,"b":12}', 'name': 'Multiply'}, 'type': 'function'}]})
|
|
||||||
```
|
|
||||||
|
|
||||||
We can add a tool parser to extract the tool calls from the generated
|
|
||||||
message to JSON:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
|
|
||||||
|
|
||||||
tool_chain = llm_with_tools | JsonOutputToolsParser()
|
|
||||||
tool_chain.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
|
||||||
```
|
|
||||||
|
|
||||||
Or back to the original Pydantic class:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
|
||||||
|
|
||||||
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
|
||||||
tool_chain.invoke("what's 3 * 12")
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
[Multiply(a=3, b=12)]
|
|
||||||
```
|
|
||||||
|
|
||||||
If we wanted to force that a tool is used (and that it is used only
|
|
||||||
once), we can set the `tool_choice` argument:
|
|
||||||
|
|
||||||
```python
|
|
||||||
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
|
|
||||||
llm_with_multiply.invoke(
|
|
||||||
"make up some numbers if you really want but I'm not forcing you"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
``` text
|
|
||||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6k6d0gr3jhqil2kqf7sgeusl', 'function': {'arguments': '{"a":5,"b":7}', 'name': 'Multiply'}, 'type': 'function'}]})
|
|
||||||
```
|
|
||||||
|
|
||||||
For more see the [ChatOpenAI API
|
|
||||||
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
## Defining functions schemas
|
## Defining functions schemas
|
||||||
|
|
||||||
In case you need to access function schemas directly, LangChain has a built-in converter that can turn
|
In case you need to access function schemas directly, LangChain has a built-in converter that can turn
|
||||||
|
@ -22,32 +22,19 @@
|
|||||||
"While chat models use language models under the hood, the interface they use is a bit different.\n",
|
"While chat models use language models under the hood, the interface they use is a bit different.\n",
|
||||||
"Rather than using a \"text in, text out\" API, they use an interface where \"chat messages\" are the inputs and outputs.\n",
|
"Rather than using a \"text in, text out\" API, they use an interface where \"chat messages\" are the inputs and outputs.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## Setup\n",
|
"## Setup\n"
|
||||||
"\n",
|
|
||||||
"For this example we'll need to install the OpenAI partner package:\n",
|
|
||||||
"\n",
|
|
||||||
"```bash\n",
|
|
||||||
"pip install langchain-openai\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:\n",
|
|
||||||
"\n",
|
|
||||||
"```bash\n",
|
|
||||||
"export OPENAI_API_KEY=\"...\"\n",
|
|
||||||
"```\n",
|
|
||||||
"If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "markdown",
|
||||||
"execution_count": null,
|
|
||||||
"id": "e230abb2-bc84-438b-b9ff-dd124acb1375",
|
"id": "e230abb2-bc84-438b-b9ff-dd124acb1375",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain_openai import ChatOpenAI\n",
|
"```{=mdx}\n",
|
||||||
|
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||||
"\n",
|
"\n",
|
||||||
"chat = ChatOpenAI(openai_api_key=\"...\")"
|
"<ChatModelTabs customVarName=\"chat\" />\n",
|
||||||
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -55,19 +42,25 @@
|
|||||||
"id": "609bbd5c-e5a1-4166-89e1-d6c52054860d",
|
"id": "609bbd5c-e5a1-4166-89e1-d6c52054860d",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"Otherwise you can initialize without any params:"
|
"If you'd prefer not to set an environment variable you can pass the key in directly via the api key arg named parameter when initiating the chat model class:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "markdown",
|
||||||
"execution_count": 1,
|
|
||||||
"id": "3d9dbf70-2397-4d6b-87ec-3e6d4699f3df",
|
"id": "3d9dbf70-2397-4d6b-87ec-3e6d4699f3df",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain_openai import ChatOpenAI\n",
|
"```{=mdx}\n",
|
||||||
"\n",
|
"<ChatModelTabs\n",
|
||||||
"chat = ChatOpenAI()"
|
" openaiParams={`model=\"gpt-3.5-turbo-0125\", openai_api_key=\"...\"`}\n",
|
||||||
|
" anthropicParams={`model=\"claude-3-sonnet-20240229\", anthropic_api_key=\"...\"`}\n",
|
||||||
|
" fireworksParams={`model=\"accounts/fireworks/models/mixtral-8x7b-instruct\", fireworks_api_key=\"...\"`}\n",
|
||||||
|
" mistralParams={`model=\"mistral-large-latest\", mistral_api_key=\"...\"`}\n",
|
||||||
|
" googleParams={`model=\"gemini-pro\", google_api_key=\"...\"`}\n",
|
||||||
|
" togetherParams={`, together_api_key=\"...\"`}\n",
|
||||||
|
" customVarName=\"chat\"\n",
|
||||||
|
"/>\n",
|
||||||
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -108,6 +101,21 @@
|
|||||||
"]"
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "570dae71",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# | output: false\n",
|
||||||
|
"# | echo: false\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain_openai import ChatOpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"chat = ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": 11,
|
||||||
|
@ -516,7 +516,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.4"
|
"version": "3.11.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -24,7 +24,7 @@ introduction](../../../docs/use_cases/question_answering/), which has
|
|||||||
two main components:
|
two main components:
|
||||||
|
|
||||||
**Indexing**: a pipeline for ingesting data from a source and indexing
|
**Indexing**: a pipeline for ingesting data from a source and indexing
|
||||||
it. *This usually happens offline.*
|
it. _This usually happens offline._
|
||||||
|
|
||||||
**Retrieval and generation**: the actual RAG chain, which takes the user
|
**Retrieval and generation**: the actual RAG chain, which takes the user
|
||||||
query at run time and retrieves the relevant data from the index, then
|
query at run time and retrieves the relevant data from the index, then
|
||||||
@ -77,7 +77,7 @@ We’ll use the following packages:
|
|||||||
%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-openai chromadb bs4
|
%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-openai chromadb bs4
|
||||||
```
|
```
|
||||||
|
|
||||||
We need to set environment variable `OPENAI_API_KEY`, which can be done
|
We need to set environment variable `OPENAI_API_KEY` for the embeddings model, which can be done
|
||||||
directly or loaded from a `.env` file like so:
|
directly or loaded from a `.env` file like so:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -125,10 +125,13 @@ from langchain_community.document_loaders import WebBaseLoader
|
|||||||
from langchain_community.vectorstores import Chroma
|
from langchain_community.vectorstores import Chroma
|
||||||
from langchain_core.output_parsers import StrOutputParser
|
from langchain_core.output_parsers import StrOutputParser
|
||||||
from langchain_core.runnables import RunnablePassthrough
|
from langchain_core.runnables import RunnablePassthrough
|
||||||
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
from langchain_openai import OpenAIEmbeddings
|
||||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||||
```
|
```
|
||||||
|
|
||||||
|
import ChatModelTabs from "@theme/ChatModelTabs";
|
||||||
|
|
||||||
|
<ChatModelTabs customVarName="llm" />
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# Load, chunk and index the contents of the blog.
|
# Load, chunk and index the contents of the blog.
|
||||||
@ -149,8 +152,6 @@ vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings
|
|||||||
# Retrieve and generate using the relevant snippets of the blog.
|
# Retrieve and generate using the relevant snippets of the blog.
|
||||||
retriever = vectorstore.as_retriever()
|
retriever = vectorstore.as_retriever()
|
||||||
prompt = hub.pull("rlm/rag-prompt")
|
prompt = hub.pull("rlm/rag-prompt")
|
||||||
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
|
||||||
|
|
||||||
|
|
||||||
def format_docs(docs):
|
def format_docs(docs):
|
||||||
return "\n\n".join(doc.page_content for doc in docs)
|
return "\n\n".join(doc.page_content for doc in docs)
|
||||||
@ -164,7 +165,6 @@ rag_chain = (
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
rag_chain.invoke("What is Task Decomposition?")
|
rag_chain.invoke("What is Task Decomposition?")
|
||||||
```
|
```
|
||||||
@ -219,7 +219,6 @@ loader = WebBaseLoader(
|
|||||||
docs = loader.load()
|
docs = loader.load()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
len(docs[0].page_content)
|
len(docs[0].page_content)
|
||||||
```
|
```
|
||||||
@ -249,6 +248,7 @@ In
|
|||||||
|
|
||||||
`DocumentLoader`: Object that loads data from a source as list of
|
`DocumentLoader`: Object that loads data from a source as list of
|
||||||
`Documents`.
|
`Documents`.
|
||||||
|
|
||||||
- [Docs](../../../docs/modules/data_connection/document_loaders/):
|
- [Docs](../../../docs/modules/data_connection/document_loaders/):
|
||||||
Detailed documentation on how to use `DocumentLoaders`.
|
Detailed documentation on how to use `DocumentLoaders`.
|
||||||
- [Integrations](../../../docs/integrations/document_loaders/): 160+
|
- [Integrations](../../../docs/integrations/document_loaders/): 160+
|
||||||
@ -289,7 +289,6 @@ text_splitter = RecursiveCharacterTextSplitter(
|
|||||||
all_splits = text_splitter.split_documents(docs)
|
all_splits = text_splitter.split_documents(docs)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
len(all_splits)
|
len(all_splits)
|
||||||
```
|
```
|
||||||
@ -319,6 +318,7 @@ all_splits[10].metadata
|
|||||||
|
|
||||||
`TextSplitter`: Object that splits a list of `Document`s into smaller
|
`TextSplitter`: Object that splits a list of `Document`s into smaller
|
||||||
chunks. Subclass of `DocumentTransformer`s.
|
chunks. Subclass of `DocumentTransformer`s.
|
||||||
|
|
||||||
- Explore `Context-aware splitters`, which keep the location (“context”) of each
|
- Explore `Context-aware splitters`, which keep the location (“context”) of each
|
||||||
split in the original `Document`: - [Markdown
|
split in the original `Document`: - [Markdown
|
||||||
files](../../../docs/modules/data_connection/document_transformers/markdown_header_metadata)
|
files](../../../docs/modules/data_connection/document_transformers/markdown_header_metadata)
|
||||||
@ -328,6 +328,7 @@ files](../../../docs/modules/data_connection/document_transformers/markdown_head
|
|||||||
|
|
||||||
`DocumentTransformer`: Object that performs a transformation on a list
|
`DocumentTransformer`: Object that performs a transformation on a list
|
||||||
of `Document`s.
|
of `Document`s.
|
||||||
|
|
||||||
- [Docs](../../../docs/modules/data_connection/document_transformers/): Detailed documentation on how to use `DocumentTransformers`
|
- [Docs](../../../docs/modules/data_connection/document_transformers/): Detailed documentation on how to use `DocumentTransformers`
|
||||||
- [Integrations](../../../docs/integrations/document_transformers/)
|
- [Integrations](../../../docs/integrations/document_transformers/)
|
||||||
- [Interface](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.transformers.BaseDocumentTransformer.html): API reference for the base interface.
|
- [Interface](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.transformers.BaseDocumentTransformer.html): API reference for the base interface.
|
||||||
@ -361,12 +362,14 @@ vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbedd
|
|||||||
|
|
||||||
`Embeddings`: Wrapper around a text embedding model, used for converting
|
`Embeddings`: Wrapper around a text embedding model, used for converting
|
||||||
text to embeddings.
|
text to embeddings.
|
||||||
|
|
||||||
- [Docs](../../../docs/modules/data_connection/text_embedding): Detailed documentation on how to use embeddings.
|
- [Docs](../../../docs/modules/data_connection/text_embedding): Detailed documentation on how to use embeddings.
|
||||||
- [Integrations](../../../docs/integrations/text_embedding/): 30+ integrations to choose from.
|
- [Integrations](../../../docs/integrations/text_embedding/): 30+ integrations to choose from.
|
||||||
- [Interface](https://api.python.langchain.com/en/latest/embeddings/langchain_core.embeddings.Embeddings.html): API reference for the base interface.
|
- [Interface](https://api.python.langchain.com/en/latest/embeddings/langchain_core.embeddings.Embeddings.html): API reference for the base interface.
|
||||||
|
|
||||||
`VectorStore`: Wrapper around a vector database, used for storing and
|
`VectorStore`: Wrapper around a vector database, used for storing and
|
||||||
querying embeddings.
|
querying embeddings.
|
||||||
|
|
||||||
- [Docs](../../../docs/modules/data_connection/vectorstores/): Detailed documentation on how to use vector stores.
|
- [Docs](../../../docs/modules/data_connection/vectorstores/): Detailed documentation on how to use vector stores.
|
||||||
- [Integrations](../../../docs/integrations/vectorstores/): 40+ integrations to choose from.
|
- [Integrations](../../../docs/integrations/vectorstores/): 40+ integrations to choose from.
|
||||||
- [Interface](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.
|
- [Interface](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.
|
||||||
@ -399,12 +402,10 @@ facilitate retrieval. Any `VectorStore` can easily be turned into a
|
|||||||
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
|
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
retrieved_docs = retriever.invoke("What are the approaches to Task Decomposition?")
|
retrieved_docs = retriever.invoke("What are the approaches to Task Decomposition?")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
len(retrieved_docs)
|
len(retrieved_docs)
|
||||||
```
|
```
|
||||||
@ -460,34 +461,13 @@ parses the output.
|
|||||||
We’ll use the gpt-3.5-turbo OpenAI chat model, but any LangChain `LLM`
|
We’ll use the gpt-3.5-turbo OpenAI chat model, but any LangChain `LLM`
|
||||||
or `ChatModel` could be substituted in.
|
or `ChatModel` could be substituted in.
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from "@theme/TabItem";
|
||||||
|
|
||||||
<Tabs>
|
<ChatModelTabs
|
||||||
<TabItem value="openai" label="OpenAI" default>
|
customVarName="llm"
|
||||||
|
anthropicParams={`"model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024"`}
|
||||||
```python
|
/>
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0125", temperature=0)
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="local" label="Anthropic">
|
|
||||||
|
|
||||||
```python
|
|
||||||
%pip install -qU langchain-anthropic
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
from langchain_anthropic import ChatAnthropic
|
|
||||||
|
|
||||||
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024)
|
|
||||||
```
|
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
We’ll use a prompt for RAG that is checked into the LangChain prompt hub
|
We’ll use a prompt for RAG that is checked into the LangChain prompt hub
|
||||||
([here](https://smith.langchain.com/hub/rlm/rag-prompt)).
|
([here](https://smith.langchain.com/hub/rlm/rag-prompt)).
|
||||||
@ -498,7 +478,6 @@ from langchain import hub
|
|||||||
prompt = hub.pull("rlm/rag-prompt")
|
prompt = hub.pull("rlm/rag-prompt")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
example_messages = prompt.invoke(
|
example_messages = prompt.invoke(
|
||||||
{"context": "filler context", "question": "filler question"}
|
{"context": "filler context", "question": "filler question"}
|
||||||
@ -543,7 +522,6 @@ rag_chain = (
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
for chunk in rag_chain.stream("What is Task Decomposition?"):
|
for chunk in rag_chain.stream("What is Task Decomposition?"):
|
||||||
print(chunk, end="", flush=True)
|
print(chunk, end="", flush=True)
|
||||||
@ -562,11 +540,13 @@ trace](https://smith.langchain.com/public/1799e8db-8a6d-4eb2-84d5-46e8d7d5a99b/r
|
|||||||
|
|
||||||
`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages
|
`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages
|
||||||
and returns a message.
|
and returns a message.
|
||||||
|
|
||||||
- [Docs](../../../docs/modules/model_io/chat/)
|
- [Docs](../../../docs/modules/model_io/chat/)
|
||||||
- [Integrations](../../../docs/integrations/chat/): 25+ integrations to choose from.
|
- [Integrations](../../../docs/integrations/chat/): 25+ integrations to choose from.
|
||||||
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html): API reference for the base interface.
|
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html): API reference for the base interface.
|
||||||
|
|
||||||
`LLM`: A text-in-text-out LLM. Takes in a string and returns a string.
|
`LLM`: A text-in-text-out LLM. Takes in a string and returns a string.
|
||||||
|
|
||||||
- [Docs](../../../docs/modules/model_io/llms)
|
- [Docs](../../../docs/modules/model_io/llms)
|
||||||
- [Integrations](../../../docs/integrations/llms): 75+ integrations to choose from.
|
- [Integrations](../../../docs/integrations/llms): 75+ integrations to choose from.
|
||||||
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.BaseLLM.html): API reference for the base interface.
|
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.BaseLLM.html): API reference for the base interface.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/* eslint-disable react/jsx-props-no-spreading */
|
/* eslint-disable react/jsx-props-no-spreading, react/destructuring-assignment */
|
||||||
import React from "react";
|
import React from "react";
|
||||||
import Tabs from "@theme/Tabs";
|
import Tabs from "@theme/Tabs";
|
||||||
import TabItem from "@theme/TabItem";
|
import TabItem from "@theme/TabItem";
|
||||||
@ -20,7 +20,24 @@ os.environ["${apiKeyName}"] = getpass.getpass()`;
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {{ openaiParams?: string, anthropicParams?: string, fireworksParams?: string, mistralParams?: string, googleParams?: string, hideOpenai?: boolean, hideAnthropic?: boolean, hideFireworks?: boolean, hideMistral?: boolean, hideGoogle?: boolean }} props
|
* @typedef {Object} ChatModelTabsProps - Component props.
|
||||||
|
* @property {string} [openaiParams] - Parameters for OpenAI chat model. Defaults to `model="gpt-3.5-turbo-0125"`
|
||||||
|
* @property {string} [anthropicParams] - Parameters for Anthropic chat model. Defaults to `model="claude-3-sonnet-20240229"`
|
||||||
|
* @property {string} [fireworksParams] - Parameters for Fireworks chat model. Defaults to `model="accounts/fireworks/models/mixtral-8x7b-instruct"`
|
||||||
|
* @property {string} [mistralParams] - Parameters for Mistral chat model. Defaults to `model="mistral-large-latest"`
|
||||||
|
* @property {string} [googleParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
|
||||||
|
* @property {string} [togetherParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
|
||||||
|
* @property {boolean} [hideOpenai] - Whether or not to hide OpenAI chat model.
|
||||||
|
* @property {boolean} [hideAnthropic] - Whether or not to hide Anthropic chat model.
|
||||||
|
* @property {boolean} [hideFireworks] - Whether or not to hide Fireworks chat model.
|
||||||
|
* @property {boolean} [hideMistral] - Whether or not to hide Mistral chat model.
|
||||||
|
* @property {boolean} [hideGoogle] - Whether or not to hide Google chat model.
|
||||||
|
* @property {boolean} [hideTogether] - Whether or not to hide Together chat model.
|
||||||
|
* @property {string} [customVarName] - Custom variable name for the model. Defaults to `model`.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param {ChatModelTabsProps} props - Component props.
|
||||||
*/
|
*/
|
||||||
export default function ChatModelTabs(props) {
|
export default function ChatModelTabs(props) {
|
||||||
const {
|
const {
|
||||||
@ -29,24 +46,36 @@ export default function ChatModelTabs(props) {
|
|||||||
fireworksParams,
|
fireworksParams,
|
||||||
mistralParams,
|
mistralParams,
|
||||||
googleParams,
|
googleParams,
|
||||||
|
togetherParams,
|
||||||
hideOpenai,
|
hideOpenai,
|
||||||
hideAnthropic,
|
hideAnthropic,
|
||||||
hideFireworks,
|
hideFireworks,
|
||||||
hideMistral,
|
hideMistral,
|
||||||
hideGoogle,
|
hideGoogle,
|
||||||
|
hideTogether,
|
||||||
|
customVarName,
|
||||||
} = props;
|
} = props;
|
||||||
|
|
||||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`
|
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`;
|
||||||
const anthropicParamsOrDefault = anthropicParams ?? `model="claude-3-sonnet-20240229"`
|
const anthropicParamsOrDefault =
|
||||||
const fireworksParamsOrDefault = fireworksParams ?? `model="accounts/fireworks/models/mixtral-8x7b-instruct"`
|
anthropicParams ?? `model="claude-3-sonnet-20240229"`;
|
||||||
const mistralParamsOrDefault = mistralParams ?? `model="mistral-large-latest"`
|
const fireworksParamsOrDefault =
|
||||||
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`
|
fireworksParams ??
|
||||||
|
`model="accounts/fireworks/models/mixtral-8x7b-instruct"`;
|
||||||
|
const mistralParamsOrDefault =
|
||||||
|
mistralParams ?? `model="mistral-large-latest"`;
|
||||||
|
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`;
|
||||||
|
const togetherParamsOrDefault =
|
||||||
|
togetherParams ??
|
||||||
|
`\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",`;
|
||||||
|
|
||||||
|
const llmVarName = customVarName ?? "model";
|
||||||
|
|
||||||
const tabItems = [
|
const tabItems = [
|
||||||
{
|
{
|
||||||
value: "OpenAI",
|
value: "OpenAI",
|
||||||
label: "OpenAI",
|
label: "OpenAI",
|
||||||
text: `from langchain_openai import ChatOpenAI\n\nmodel = ChatOpenAI(${openAIParamsOrDefault})`,
|
text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${openAIParamsOrDefault})`,
|
||||||
apiKeyName: "OPENAI_API_KEY",
|
apiKeyName: "OPENAI_API_KEY",
|
||||||
packageName: "langchain-openai",
|
packageName: "langchain-openai",
|
||||||
default: true,
|
default: true,
|
||||||
@ -55,7 +84,7 @@ export default function ChatModelTabs(props) {
|
|||||||
{
|
{
|
||||||
value: "Anthropic",
|
value: "Anthropic",
|
||||||
label: "Anthropic",
|
label: "Anthropic",
|
||||||
text: `from langchain_anthropic import ChatAnthropic\n\nmodel = ChatAnthropic(${anthropicParamsOrDefault})`,
|
text: `from langchain_anthropic import ChatAnthropic\n\n${llmVarName} = ChatAnthropic(${anthropicParamsOrDefault})`,
|
||||||
apiKeyName: "ANTHROPIC_API_KEY",
|
apiKeyName: "ANTHROPIC_API_KEY",
|
||||||
packageName: "langchain-anthropic",
|
packageName: "langchain-anthropic",
|
||||||
default: false,
|
default: false,
|
||||||
@ -64,7 +93,7 @@ export default function ChatModelTabs(props) {
|
|||||||
{
|
{
|
||||||
value: "FireworksAI",
|
value: "FireworksAI",
|
||||||
label: "FireworksAI",
|
label: "FireworksAI",
|
||||||
text: `from langchain_fireworks import ChatFireworks\n\nmodel = ChatFireworks(${fireworksParamsOrDefault})`,
|
text: `from langchain_fireworks import ChatFireworks\n\n${llmVarName} = ChatFireworks(${fireworksParamsOrDefault})`,
|
||||||
apiKeyName: "FIREWORKS_API_KEY",
|
apiKeyName: "FIREWORKS_API_KEY",
|
||||||
packageName: "langchain-fireworks",
|
packageName: "langchain-fireworks",
|
||||||
default: false,
|
default: false,
|
||||||
@ -73,7 +102,7 @@ export default function ChatModelTabs(props) {
|
|||||||
{
|
{
|
||||||
value: "MistralAI",
|
value: "MistralAI",
|
||||||
label: "MistralAI",
|
label: "MistralAI",
|
||||||
text: `from langchain_mistralai import ChatMistralAI\n\nmodel = ChatMistralAI(${mistralParamsOrDefault})`,
|
text: `from langchain_mistralai import ChatMistralAI\n\n${llmVarName} = ChatMistralAI(${mistralParamsOrDefault})`,
|
||||||
apiKeyName: "MISTRAL_API_KEY",
|
apiKeyName: "MISTRAL_API_KEY",
|
||||||
packageName: "langchain-mistralai",
|
packageName: "langchain-mistralai",
|
||||||
default: false,
|
default: false,
|
||||||
@ -82,19 +111,37 @@ export default function ChatModelTabs(props) {
|
|||||||
{
|
{
|
||||||
value: "Google",
|
value: "Google",
|
||||||
label: "Google",
|
label: "Google",
|
||||||
text: `from langchain_google_genai import ChatGoogleGenerativeAI\n\nmodel = ChatGoogleGenerativeAI(${googleParamsOrDefault})`,
|
text: `from langchain_google_genai import ChatGoogleGenerativeAI\n\n${llmVarName} = ChatGoogleGenerativeAI(${googleParamsOrDefault})`,
|
||||||
apiKeyName: "GOOGLE_API_KEY",
|
apiKeyName: "GOOGLE_API_KEY",
|
||||||
packageName: "langchain-google-genai",
|
packageName: "langchain-google-genai",
|
||||||
default: false,
|
default: false,
|
||||||
shouldHide: hideGoogle,
|
shouldHide: hideGoogle,
|
||||||
}
|
},
|
||||||
]
|
{
|
||||||
|
value: "TogetherAI",
|
||||||
|
label: "TogetherAI",
|
||||||
|
text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${togetherParamsOrDefault})`,
|
||||||
|
apiKeyName: "TOGETHER_API_KEY",
|
||||||
|
packageName: "langchain-openai",
|
||||||
|
default: false,
|
||||||
|
shouldHide: hideTogether,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Tabs groupId="modelTabs">
|
<Tabs groupId="modelTabs">
|
||||||
{tabItems.filter((tabItem) => !tabItem.shouldHide).map((tabItem) => (
|
{tabItems
|
||||||
<TabItem value={tabItem.value} label={tabItem.label} default={tabItem.default}>
|
.filter((tabItem) => !tabItem.shouldHide)
|
||||||
<Setup apiKeyName={tabItem.apiKeyName} packageName={tabItem.packageName} />
|
.map((tabItem) => (
|
||||||
|
<TabItem
|
||||||
|
value={tabItem.value}
|
||||||
|
label={tabItem.label}
|
||||||
|
default={tabItem.default}
|
||||||
|
>
|
||||||
|
<Setup
|
||||||
|
apiKeyName={tabItem.apiKeyName}
|
||||||
|
packageName={tabItem.packageName}
|
||||||
|
/>
|
||||||
<CodeBlock language="python">{tabItem.text}</CodeBlock>
|
<CodeBlock language="python">{tabItem.text}</CodeBlock>
|
||||||
</TabItem>
|
</TabItem>
|
||||||
))}
|
))}
|
||||||
|
@ -4,9 +4,9 @@ yum -y update
|
|||||||
yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip -y
|
yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip -y
|
||||||
|
|
||||||
# install quarto
|
# install quarto
|
||||||
wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.3.450/quarto-1.3.450-linux-amd64.tar.gz
|
wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.4.552/quarto-1.4.552-linux-amd64.tar.gz
|
||||||
tar -xzf quarto-1.3.450-linux-amd64.tar.gz
|
tar -xzf quarto-1.4.552-linux-amd64.tar.gz
|
||||||
export PATH=$PATH:$(pwd)/quarto-1.3.450/bin/
|
export PATH=$PATH:$(pwd)/quarto-1.4.552/bin/
|
||||||
|
|
||||||
|
|
||||||
# setup python env
|
# setup python env
|
||||||
|
Loading…
Reference in New Issue
Block a user