mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 17:38:36 +00:00
docs[minor]: Add chat model tabs to docs pages (#19589)
This commit is contained in:
parent
bd02b83acd
commit
ce0a588ae6
@ -40,6 +40,33 @@
|
||||
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c3d54f72",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f9eed8e8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@ -60,10 +87,8 @@
|
||||
"source": [
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\")\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain = prompt | model | output_parser\n",
|
||||
@ -324,6 +349,16 @@
|
||||
"For our next example, we want to run a retrieval-augmented generation chain to add some context when responding to questions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b8fe8eb4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@ -338,7 +373,7 @@
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
|
||||
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
|
||||
@ -352,7 +387,6 @@
|
||||
"Question: {question}\n",
|
||||
"\"\"\"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"setup_and_retrieval = RunnableParallel(\n",
|
||||
@ -495,7 +529,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.4"
|
||||
"version": "3.11.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -10,7 +10,9 @@
|
||||
"title: Why use LCEL\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"import { ColumnContainer, Column } from \\\"@theme/Columns\\\";"
|
||||
"```{=mdx}\n",
|
||||
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -53,10 +55,13 @@
|
||||
"## Invoke\n",
|
||||
"In the simplest case, we just want to pass in a topic string and get back a joke string:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n"
|
||||
]
|
||||
},
|
||||
@ -95,9 +100,12 @@
|
||||
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@ -136,14 +144,19 @@
|
||||
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"## Stream\n",
|
||||
"If we want to stream results instead, we'll need to change our function:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -184,10 +197,11 @@
|
||||
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
@ -208,15 +222,19 @@
|
||||
"id": "b9b41e78-ddeb-44d0-a58b-a0ea0c99a761",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Batch\n",
|
||||
"\n",
|
||||
"If we want to run on a batch of inputs in parallel, we'll again need a new function:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -244,10 +262,11 @@
|
||||
"id": "9b3e9d34-6775-43c1-93d8-684b58e341ab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
@ -267,15 +286,18 @@
|
||||
"id": "cc5ba36f-eec1-4fc1-8cfe-fa242a7f7809",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"## Async\n",
|
||||
"\n",
|
||||
"If we need an asynchronous version:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -311,10 +333,12 @@
|
||||
"id": "2f209290-498c-4c17-839e-ee9002919846",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
" \n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
]
|
||||
@ -334,13 +358,16 @@
|
||||
"id": "1f282129-99a3-40f4-b67f-2d0718b1bea9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"## Async Batch\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -370,9 +397,11 @@
|
||||
"id": "90691048-17ae-479d-83c2-859e33ddf3eb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@ -393,15 +422,19 @@
|
||||
"id": "f6888245-1ebe-4768-a53b-e1fef6a8b379",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## LLM instead of chat model\n",
|
||||
"\n",
|
||||
"If we want to use a completion endpoint instead of a chat endpoint: \n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -433,9 +466,11 @@
|
||||
"id": "45342cd6-58c2-4543-9392-773e05ef06e7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@ -466,15 +501,19 @@
|
||||
"id": "ca115eaf-59ef-45c1-aac1-e8b0ce7db250",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Different model provider\n",
|
||||
"\n",
|
||||
"If we want to use Anthropic instead of OpenAI: \n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -512,9 +551,11 @@
|
||||
"id": "52a0c9f8-e316-42e1-af85-cabeba4b7059",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@ -545,15 +586,19 @@
|
||||
"id": "d7a91eee-d017-420d-b215-f663dcbf8ed2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Runtime configurability\n",
|
||||
"\n",
|
||||
"If we wanted to make the choice of chat model or LLM configurable at runtime:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -634,9 +679,11 @@
|
||||
"id": "d1530c5c-6635-4599-9483-6df357ca2d64",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### With LCEL\n",
|
||||
"\n"
|
||||
@ -694,15 +741,19 @@
|
||||
"id": "370dd4d7-b825-40c4-ae3c-2693cba2f22a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Logging\n",
|
||||
"\n",
|
||||
"If we want to log our intermediate results:\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n",
|
||||
@ -733,9 +784,11 @@
|
||||
"id": "16bd20fd-43cd-4aaf-866f-a53d1f20312d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"Every component has built-in integrations with LangSmith. If we set the following two environment variables, all chain traces are logged to LangSmith.\n",
|
||||
@ -770,16 +823,19 @@
|
||||
"id": "e25ce3c5-27a7-4954-9f0e-b94313597135",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"## Fallbacks\n",
|
||||
"\n",
|
||||
"If we wanted to add fallback logic, in case one model API is down:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n",
|
||||
@ -823,9 +879,11 @@
|
||||
"id": "f7ef59b5-2ce3-479e-a7ac-79e1e2f30e9c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@ -850,8 +908,10 @@
|
||||
"id": "3af52d36-37c6-4d89-b515-95d7270bb96a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>"
|
||||
"</ColumnContainer>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -863,8 +923,10 @@
|
||||
"\n",
|
||||
"Even in this simple case, our LCEL chain succinctly packs in a lot of functionality. As chains become more complex, this becomes especially valuable.\n",
|
||||
"\n",
|
||||
"```{=mdx}\n",
|
||||
"<ColumnContainer>\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Without LCEL\n",
|
||||
"\n"
|
||||
@ -1044,9 +1106,11 @@
|
||||
"id": "9fb3d71d-8c69-4dc4-81b7-95cd46b271c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"\n",
|
||||
"<Column>\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### LCEL\n",
|
||||
"\n"
|
||||
@ -1101,8 +1165,10 @@
|
||||
"id": "e3637d39",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"</Column>\n",
|
||||
"</ColumnContainer>"
|
||||
"</ColumnContainer>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1140,4 +1206,5 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,33 @@
|
||||
"It can speed up your application by reducing the number of API calls you make to the LLM provider.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "289b31de",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"<ChatModelTabs customVarName=\"llm\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c6641f37",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
@ -19,10 +46,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.globals import set_llm_cache\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI()"
|
||||
"# <!-- ruff: noqa: F821 -->\n",
|
||||
"from langchain.globals import set_llm_cache"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -38,7 +38,6 @@ Before getting started make sure you have `langchain-core` installed.
|
||||
%pip install -qU langchain-core langchain-openai
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
import getpass
|
||||
import os
|
||||
@ -64,38 +63,26 @@ class Multiply(BaseModel):
|
||||
b: int = Field(..., description="Second integer")
|
||||
```
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="openai" label="OpenAI" default>
|
||||
import ChatModelTabs from "@theme/ChatModelTabs";
|
||||
|
||||
Set up dependencies and API keys:
|
||||
<ChatModelTabs
|
||||
customVarName="llm"
|
||||
fireworksParams={`model="accounts/fireworks/models/firefunction-v1", temperature=0`}
|
||||
/>
|
||||
|
||||
```python
|
||||
%pip install -qU langchain-openai
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
os.environ["OPENAI_API_KEY"] = getpass.getpass()
|
||||
```
|
||||
|
||||
We can use the `ChatOpenAI.bind_tools()` method to handle converting
|
||||
`Multiply` to an OpenAI function and binding it to the model (i.e.,
|
||||
We can use the `bind_tools()` method to handle converting
|
||||
`Multiply` to a "function" and binding it to the model (i.e.,
|
||||
passing it in each time the model is invoked).
|
||||
|
||||
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
||||
llm_with_tools = llm.bind_tools([Multiply])
|
||||
llm_with_tools.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Q8ZQ97Qrj5zalugSkYMGV1Uo', 'function': {'arguments': '{"a":3,"b":12}', 'name': 'Multiply'}, 'type': 'function'}]})
|
||||
```
|
||||
|
||||
@ -109,7 +96,7 @@ tool_chain = llm_with_tools | JsonOutputToolsParser()
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
||||
```
|
||||
|
||||
@ -122,57 +109,10 @@ tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
[Multiply(a=3, b=12)]
|
||||
```
|
||||
|
||||
If we wanted to force that a tool is used (and that it is used only
|
||||
once), we can set the `tool_choice` argument:
|
||||
|
||||
```python
|
||||
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
|
||||
llm_with_multiply.invoke(
|
||||
"make up some numbers if you really want but I'm not forcing you"
|
||||
)
|
||||
```
|
||||
|
||||
``` text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_f3DApOzb60iYjTfOhVFhDRMI', 'function': {'arguments': '{"a":5,"b":10}', 'name': 'Multiply'}, 'type': 'function'}]})
|
||||
```
|
||||
|
||||
For more see the [ChatOpenAI API
|
||||
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="fireworks" label="Fireworks">
|
||||
|
||||
Install dependencies and set API keys:
|
||||
|
||||
```python
|
||||
%pip install -qU langchain-fireworks
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
|
||||
```
|
||||
|
||||
We can use the `ChatFireworks.bind_tools()` method to handle converting
|
||||
`Multiply` to a valid function schema and binding it to the model (i.e.,
|
||||
passing it in each time the model is invoked).
|
||||
|
||||
```python
|
||||
from langchain_fireworks import ChatFireworks
|
||||
|
||||
llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0)
|
||||
llm_with_tools = llm.bind_tools([Multiply])
|
||||
llm_with_tools.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
AIMessage(content='Three multiplied by twelve is 36.')
|
||||
```
|
||||
|
||||
If our model isn’t using the tool, as is the case here, we can force
|
||||
tool usage by specifying `tool_choice="any"` or by specifying the name
|
||||
of the specific tool we want used:
|
||||
@ -182,175 +122,12 @@ llm_with_tools = llm.bind_tools([Multiply], tool_choice="Multiply")
|
||||
llm_with_tools.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_qIP2bJugb67LGvc6Zhwkvfqc', 'type': 'function', 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
|
||||
```
|
||||
|
||||
We can add a tool parser to extract the tool calls from the generated
|
||||
message to JSON:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
|
||||
|
||||
tool_chain = llm_with_tools | JsonOutputToolsParser()
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
||||
```
|
||||
|
||||
Or back to the original Pydantic class:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
||||
|
||||
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
[Multiply(a=3, b=12)]
|
||||
```
|
||||
|
||||
For more see the [ChatFireworks](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html#langchain_fireworks.chat_models.ChatFireworks.bind_tools) reference.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="mistral" label="Mistral">
|
||||
|
||||
Install dependencies and set API keys:
|
||||
|
||||
```python
|
||||
%pip install -qU langchain-mistralai
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
|
||||
```
|
||||
|
||||
We can use the `ChatMistralAI.bind_tools()` method to handle converting
|
||||
`Multiply` to a valid function schema and binding it to the model (i.e.,
|
||||
passing it in each time the model is invoked).
|
||||
|
||||
```python
|
||||
from langchain_mistralai import ChatMistralAI
|
||||
|
||||
llm = ChatMistralAI(model="mistral-large-latest", temperature=0)
|
||||
llm_with_tools = llm.bind_tools([Multiply])
|
||||
llm_with_tools.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'null', 'type': <ToolType.function: 'function'>, 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
|
||||
```
|
||||
|
||||
We can add a tool parser to extract the tool calls from the generated
|
||||
message to JSON:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
|
||||
|
||||
tool_chain = llm_with_tools | JsonOutputToolsParser()
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
||||
```
|
||||
|
||||
Or back to the original Pydantic class:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
||||
|
||||
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
[Multiply(a=3, b=12)]
|
||||
```
|
||||
|
||||
We can force tool usage by specifying `tool_choice="any"`:
|
||||
|
||||
```python
|
||||
llm_with_tools = llm.bind_tools([Multiply], tool_choice="any")
|
||||
llm_with_tools.invoke("I don't even want you to use the tool")
|
||||
```
|
||||
|
||||
``` text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'null', 'type': <ToolType.function: 'function'>, 'function': {'name': 'Multiply', 'arguments': '{"a": 5, "b": 7}'}}]})
|
||||
```
|
||||
|
||||
For more see the [ChatMistralAI API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html#langchain_mistralai.chat_models.ChatMistralAI).
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="together" label="Together">
|
||||
|
||||
Since TogetherAI is a drop-in replacement for OpenAI, we can just use
|
||||
the OpenAI integration.
|
||||
|
||||
Install dependencies and set API keys:
|
||||
|
||||
```python
|
||||
%pip install -qU langchain-openai
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
|
||||
```
|
||||
|
||||
We can use the `ChatOpenAI.bind_tools()` method to handle converting
|
||||
`Multiply` to a valid function schema and binding it to the model (i.e.,
|
||||
passing it in each time the model is invoked).
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(
|
||||
base_url="https://api.together.xyz/v1",
|
||||
api_key=os.environ["TOGETHER_API_KEY"],
|
||||
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
)
|
||||
llm_with_tools = llm.bind_tools([Multiply])
|
||||
llm_with_tools.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_4tc61dp0478zafqe33hfriee', 'function': {'arguments': '{"a":3,"b":12}', 'name': 'Multiply'}, 'type': 'function'}]})
|
||||
```
|
||||
|
||||
We can add a tool parser to extract the tool calls from the generated
|
||||
message to JSON:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
|
||||
|
||||
tool_chain = llm_with_tools | JsonOutputToolsParser()
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
|
||||
```
|
||||
|
||||
Or back to the original Pydantic class:
|
||||
|
||||
```python
|
||||
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
|
||||
|
||||
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
|
||||
tool_chain.invoke("what's 3 * 12")
|
||||
```
|
||||
|
||||
``` text
|
||||
[Multiply(a=3, b=12)]
|
||||
```
|
||||
|
||||
If we wanted to force that a tool is used (and that it is used only
|
||||
once), we can set the `tool_choice` argument:
|
||||
once), we can set the `tool_choice` argument to the name of the tool:
|
||||
|
||||
```python
|
||||
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
|
||||
@ -359,16 +136,13 @@ llm_with_multiply.invoke(
|
||||
)
|
||||
```
|
||||
|
||||
``` text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6k6d0gr3jhqil2kqf7sgeusl', 'function': {'arguments': '{"a":5,"b":7}', 'name': 'Multiply'}, 'type': 'function'}]})
|
||||
```text
|
||||
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_f3DApOzb60iYjTfOhVFhDRMI', 'function': {'arguments': '{"a":5,"b":10}', 'name': 'Multiply'}, 'type': 'function'}]})
|
||||
```
|
||||
|
||||
For more see the [ChatOpenAI API
|
||||
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Defining functions schemas
|
||||
|
||||
In case you need to access function schemas directly, LangChain has a built-in converter that can turn
|
||||
@ -395,7 +169,7 @@ def multiply(a: int, b: int) -> int:
|
||||
print(json.dumps(convert_to_openai_tool(multiply), indent=2))
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
@ -438,7 +212,7 @@ class multiply(BaseModel):
|
||||
print(json.dumps(convert_to_openai_tool(multiply), indent=2))
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
@ -493,7 +267,7 @@ class Multiply(BaseTool):
|
||||
print(json.dumps(convert_to_openai_tool(Multiply()), indent=2))
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
|
@ -22,32 +22,19 @@
|
||||
"While chat models use language models under the hood, the interface they use is a bit different.\n",
|
||||
"Rather than using a \"text in, text out\" API, they use an interface where \"chat messages\" are the inputs and outputs.\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"For this example we'll need to install the OpenAI partner package:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install langchain-openai\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export OPENAI_API_KEY=\"...\"\n",
|
||||
"```\n",
|
||||
"If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:\n"
|
||||
"## Setup\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"id": "e230abb2-bc84-438b-b9ff-dd124acb1375",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"```{=mdx}\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI(openai_api_key=\"...\")"
|
||||
"<ChatModelTabs customVarName=\"chat\" />\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -55,19 +42,25 @@
|
||||
"id": "609bbd5c-e5a1-4166-89e1-d6c52054860d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Otherwise you can initialize without any params:"
|
||||
"If you'd prefer not to set an environment variable you can pass the key in directly via the api key arg named parameter when initiating the chat model class:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"cell_type": "markdown",
|
||||
"id": "3d9dbf70-2397-4d6b-87ec-3e6d4699f3df",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI()"
|
||||
"```{=mdx}\n",
|
||||
"<ChatModelTabs\n",
|
||||
" openaiParams={`model=\"gpt-3.5-turbo-0125\", openai_api_key=\"...\"`}\n",
|
||||
" anthropicParams={`model=\"claude-3-sonnet-20240229\", anthropic_api_key=\"...\"`}\n",
|
||||
" fireworksParams={`model=\"accounts/fireworks/models/mixtral-8x7b-instruct\", fireworks_api_key=\"...\"`}\n",
|
||||
" mistralParams={`model=\"mistral-large-latest\", mistral_api_key=\"...\"`}\n",
|
||||
" googleParams={`model=\"gemini-pro\", google_api_key=\"...\"`}\n",
|
||||
" togetherParams={`, together_api_key=\"...\"`}\n",
|
||||
" customVarName=\"chat\"\n",
|
||||
"/>\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -108,6 +101,21 @@
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "570dae71",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# | output: false\n",
|
||||
"# | echo: false\n",
|
||||
"\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"chat = ChatOpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
|
@ -516,7 +516,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
"version": "3.11.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -24,7 +24,7 @@ introduction](../../../docs/use_cases/question_answering/), which has
|
||||
two main components:
|
||||
|
||||
**Indexing**: a pipeline for ingesting data from a source and indexing
|
||||
it. *This usually happens offline.*
|
||||
it. _This usually happens offline._
|
||||
|
||||
**Retrieval and generation**: the actual RAG chain, which takes the user
|
||||
query at run time and retrieves the relevant data from the index, then
|
||||
@ -77,7 +77,7 @@ We’ll use the following packages:
|
||||
%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-openai chromadb bs4
|
||||
```
|
||||
|
||||
We need to set environment variable `OPENAI_API_KEY`, which can be done
|
||||
We need to set environment variable `OPENAI_API_KEY` for the embeddings model, which can be done
|
||||
directly or loaded from a `.env` file like so:
|
||||
|
||||
```python
|
||||
@ -125,10 +125,13 @@ from langchain_community.document_loaders import WebBaseLoader
|
||||
from langchain_community.vectorstores import Chroma
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
from langchain_core.runnables import RunnablePassthrough
|
||||
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
```
|
||||
|
||||
import ChatModelTabs from "@theme/ChatModelTabs";
|
||||
|
||||
<ChatModelTabs customVarName="llm" />
|
||||
|
||||
```python
|
||||
# Load, chunk and index the contents of the blog.
|
||||
@ -149,8 +152,6 @@ vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings
|
||||
# Retrieve and generate using the relevant snippets of the blog.
|
||||
retriever = vectorstore.as_retriever()
|
||||
prompt = hub.pull("rlm/rag-prompt")
|
||||
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
||||
|
||||
|
||||
def format_docs(docs):
|
||||
return "\n\n".join(doc.page_content for doc in docs)
|
||||
@ -164,12 +165,11 @@ rag_chain = (
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
rag_chain.invoke("What is Task Decomposition?")
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It can be done through prompting techniques like Chain of Thought or Tree of Thoughts, or by using task-specific instructions or human inputs. Task decomposition helps agents plan ahead and manage complicated tasks more effectively.'
|
||||
```
|
||||
|
||||
@ -219,12 +219,11 @@ loader = WebBaseLoader(
|
||||
docs = loader.load()
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
len(docs[0].page_content)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
42824
|
||||
```
|
||||
|
||||
@ -232,7 +231,7 @@ len(docs[0].page_content)
|
||||
print(docs[0].page_content[:500])
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
|
||||
|
||||
LLM Powered Autonomous Agents
|
||||
@ -249,12 +248,13 @@ In
|
||||
|
||||
`DocumentLoader`: Object that loads data from a source as list of
|
||||
`Documents`.
|
||||
|
||||
- [Docs](../../../docs/modules/data_connection/document_loaders/):
|
||||
Detailed documentation on how to use `DocumentLoaders`.
|
||||
Detailed documentation on how to use `DocumentLoaders`.
|
||||
- [Integrations](../../../docs/integrations/document_loaders/): 160+
|
||||
integrations to choose from.
|
||||
integrations to choose from.
|
||||
- [Interface](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html):
|
||||
API reference for the base interface.
|
||||
API reference for the base interface.
|
||||
|
||||
## 2. Indexing: Split {#indexing-split}
|
||||
|
||||
@ -289,12 +289,11 @@ text_splitter = RecursiveCharacterTextSplitter(
|
||||
all_splits = text_splitter.split_documents(docs)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
len(all_splits)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
66
|
||||
```
|
||||
|
||||
@ -302,7 +301,7 @@ len(all_splits)
|
||||
len(all_splits[0].page_content)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
969
|
||||
```
|
||||
|
||||
@ -310,7 +309,7 @@ len(all_splits[0].page_content)
|
||||
all_splits[10].metadata
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
{'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/',
|
||||
'start_index': 7056}
|
||||
```
|
||||
@ -319,15 +318,17 @@ all_splits[10].metadata
|
||||
|
||||
`TextSplitter`: Object that splits a list of `Document`s into smaller
|
||||
chunks. Subclass of `DocumentTransformer`s.
|
||||
|
||||
- Explore `Context-aware splitters`, which keep the location (“context”) of each
|
||||
split in the original `Document`: - [Markdown
|
||||
files](../../../docs/modules/data_connection/document_transformers/markdown_header_metadata)
|
||||
split in the original `Document`: - [Markdown
|
||||
files](../../../docs/modules/data_connection/document_transformers/markdown_header_metadata)
|
||||
- [Code (py or js)](../../../docs/integrations/document_loaders/source_code)
|
||||
- [Scientific papers](../../../docs/integrations/document_loaders/grobid)
|
||||
- [Interface](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TextSplitter.html): API reference for the base interface.
|
||||
|
||||
`DocumentTransformer`: Object that performs a transformation on a list
|
||||
of `Document`s.
|
||||
|
||||
- [Docs](../../../docs/modules/data_connection/document_transformers/): Detailed documentation on how to use `DocumentTransformers`
|
||||
- [Integrations](../../../docs/integrations/document_transformers/)
|
||||
- [Interface](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.transformers.BaseDocumentTransformer.html): API reference for the base interface.
|
||||
@ -361,12 +362,14 @@ vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbedd
|
||||
|
||||
`Embeddings`: Wrapper around a text embedding model, used for converting
|
||||
text to embeddings.
|
||||
|
||||
- [Docs](../../../docs/modules/data_connection/text_embedding): Detailed documentation on how to use embeddings.
|
||||
- [Integrations](../../../docs/integrations/text_embedding/): 30+ integrations to choose from.
|
||||
- [Interface](https://api.python.langchain.com/en/latest/embeddings/langchain_core.embeddings.Embeddings.html): API reference for the base interface.
|
||||
|
||||
`VectorStore`: Wrapper around a vector database, used for storing and
|
||||
querying embeddings.
|
||||
|
||||
- [Docs](../../../docs/modules/data_connection/vectorstores/): Detailed documentation on how to use vector stores.
|
||||
- [Integrations](../../../docs/integrations/vectorstores/): 40+ integrations to choose from.
|
||||
- [Interface](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.
|
||||
@ -399,17 +402,15 @@ facilitate retrieval. Any `VectorStore` can easily be turned into a
|
||||
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
retrieved_docs = retriever.invoke("What are the approaches to Task Decomposition?")
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
len(retrieved_docs)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
6
|
||||
```
|
||||
|
||||
@ -417,7 +418,7 @@ len(retrieved_docs)
|
||||
print(retrieved_docs[0].page_content)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.
|
||||
Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.
|
||||
```
|
||||
@ -460,34 +461,13 @@ parses the output.
|
||||
We’ll use the gpt-3.5-turbo OpenAI chat model, but any LangChain `LLM`
|
||||
or `ChatModel` could be substituted in.
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="openai" label="OpenAI" default>
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0125", temperature=0)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="local" label="Anthropic">
|
||||
|
||||
```python
|
||||
%pip install -qU langchain-anthropic
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
<ChatModelTabs
|
||||
customVarName="llm"
|
||||
anthropicParams={`"model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024"`}
|
||||
/>
|
||||
|
||||
We’ll use a prompt for RAG that is checked into the LangChain prompt hub
|
||||
([here](https://smith.langchain.com/hub/rlm/rag-prompt)).
|
||||
@ -498,7 +478,6 @@ from langchain import hub
|
||||
prompt = hub.pull("rlm/rag-prompt")
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
example_messages = prompt.invoke(
|
||||
{"context": "filler context", "question": "filler question"}
|
||||
@ -506,7 +485,7 @@ example_messages = prompt.invoke(
|
||||
example_messages
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
[HumanMessage(content="You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\nQuestion: filler question \nContext: filler context \nAnswer:")]
|
||||
```
|
||||
|
||||
@ -514,7 +493,7 @@ example_messages
|
||||
print(example_messages[0].content)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
|
||||
Question: filler question
|
||||
Context: filler context
|
||||
@ -543,13 +522,12 @@ rag_chain = (
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
for chunk in rag_chain.stream("What is Task Decomposition?"):
|
||||
print(chunk, end="", flush=True)
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks, allowing for easier interpretation and execution by autonomous agents or models. Task decomposition can be done through various methods, such as using prompting techniques, task-specific instructions, or human inputs.
|
||||
```
|
||||
|
||||
@ -562,11 +540,13 @@ trace](https://smith.langchain.com/public/1799e8db-8a6d-4eb2-84d5-46e8d7d5a99b/r
|
||||
|
||||
`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages
|
||||
and returns a message.
|
||||
|
||||
- [Docs](../../../docs/modules/model_io/chat/)
|
||||
- [Integrations](../../../docs/integrations/chat/): 25+ integrations to choose from.
|
||||
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html): API reference for the base interface.
|
||||
|
||||
`LLM`: A text-in-text-out LLM. Takes in a string and returns a string.
|
||||
|
||||
- [Docs](../../../docs/modules/model_io/llms)
|
||||
- [Integrations](../../../docs/integrations/llms): 75+ integrations to choose from.
|
||||
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.BaseLLM.html): API reference for the base interface.
|
||||
@ -605,7 +585,7 @@ rag_chain = (
|
||||
rag_chain.invoke("What is Task Decomposition?")
|
||||
```
|
||||
|
||||
``` text
|
||||
```text
|
||||
'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks, allowing for a more systematic and organized approach to problem-solving. Thanks for asking!'
|
||||
```
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* eslint-disable react/jsx-props-no-spreading */
|
||||
/* eslint-disable react/jsx-props-no-spreading, react/destructuring-assignment */
|
||||
import React from "react";
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
@ -20,7 +20,24 @@ os.environ["${apiKeyName}"] = getpass.getpass()`;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {{ openaiParams?: string, anthropicParams?: string, fireworksParams?: string, mistralParams?: string, googleParams?: string, hideOpenai?: boolean, hideAnthropic?: boolean, hideFireworks?: boolean, hideMistral?: boolean, hideGoogle?: boolean }} props
|
||||
* @typedef {Object} ChatModelTabsProps - Component props.
|
||||
* @property {string} [openaiParams] - Parameters for OpenAI chat model. Defaults to `model="gpt-3.5-turbo-0125"`
|
||||
* @property {string} [anthropicParams] - Parameters for Anthropic chat model. Defaults to `model="claude-3-sonnet-20240229"`
|
||||
* @property {string} [fireworksParams] - Parameters for Fireworks chat model. Defaults to `model="accounts/fireworks/models/mixtral-8x7b-instruct"`
|
||||
* @property {string} [mistralParams] - Parameters for Mistral chat model. Defaults to `model="mistral-large-latest"`
|
||||
* @property {string} [googleParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
|
||||
* @property {string} [togetherParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
|
||||
* @property {boolean} [hideOpenai] - Whether or not to hide OpenAI chat model.
|
||||
* @property {boolean} [hideAnthropic] - Whether or not to hide Anthropic chat model.
|
||||
* @property {boolean} [hideFireworks] - Whether or not to hide Fireworks chat model.
|
||||
* @property {boolean} [hideMistral] - Whether or not to hide Mistral chat model.
|
||||
* @property {boolean} [hideGoogle] - Whether or not to hide Google chat model.
|
||||
* @property {boolean} [hideTogether] - Whether or not to hide Together chat model.
|
||||
* @property {string} [customVarName] - Custom variable name for the model. Defaults to `model`.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @param {ChatModelTabsProps} props - Component props.
|
||||
*/
|
||||
export default function ChatModelTabs(props) {
|
||||
const {
|
||||
@ -29,24 +46,36 @@ export default function ChatModelTabs(props) {
|
||||
fireworksParams,
|
||||
mistralParams,
|
||||
googleParams,
|
||||
togetherParams,
|
||||
hideOpenai,
|
||||
hideAnthropic,
|
||||
hideFireworks,
|
||||
hideMistral,
|
||||
hideGoogle,
|
||||
hideTogether,
|
||||
customVarName,
|
||||
} = props;
|
||||
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`
|
||||
const anthropicParamsOrDefault = anthropicParams ?? `model="claude-3-sonnet-20240229"`
|
||||
const fireworksParamsOrDefault = fireworksParams ?? `model="accounts/fireworks/models/mixtral-8x7b-instruct"`
|
||||
const mistralParamsOrDefault = mistralParams ?? `model="mistral-large-latest"`
|
||||
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`
|
||||
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`;
|
||||
const anthropicParamsOrDefault =
|
||||
anthropicParams ?? `model="claude-3-sonnet-20240229"`;
|
||||
const fireworksParamsOrDefault =
|
||||
fireworksParams ??
|
||||
`model="accounts/fireworks/models/mixtral-8x7b-instruct"`;
|
||||
const mistralParamsOrDefault =
|
||||
mistralParams ?? `model="mistral-large-latest"`;
|
||||
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`;
|
||||
const togetherParamsOrDefault =
|
||||
togetherParams ??
|
||||
`\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",`;
|
||||
|
||||
const llmVarName = customVarName ?? "model";
|
||||
|
||||
const tabItems = [
|
||||
{
|
||||
value: "OpenAI",
|
||||
label: "OpenAI",
|
||||
text: `from langchain_openai import ChatOpenAI\n\nmodel = ChatOpenAI(${openAIParamsOrDefault})`,
|
||||
text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${openAIParamsOrDefault})`,
|
||||
apiKeyName: "OPENAI_API_KEY",
|
||||
packageName: "langchain-openai",
|
||||
default: true,
|
||||
@ -55,7 +84,7 @@ export default function ChatModelTabs(props) {
|
||||
{
|
||||
value: "Anthropic",
|
||||
label: "Anthropic",
|
||||
text: `from langchain_anthropic import ChatAnthropic\n\nmodel = ChatAnthropic(${anthropicParamsOrDefault})`,
|
||||
text: `from langchain_anthropic import ChatAnthropic\n\n${llmVarName} = ChatAnthropic(${anthropicParamsOrDefault})`,
|
||||
apiKeyName: "ANTHROPIC_API_KEY",
|
||||
packageName: "langchain-anthropic",
|
||||
default: false,
|
||||
@ -64,7 +93,7 @@ export default function ChatModelTabs(props) {
|
||||
{
|
||||
value: "FireworksAI",
|
||||
label: "FireworksAI",
|
||||
text: `from langchain_fireworks import ChatFireworks\n\nmodel = ChatFireworks(${fireworksParamsOrDefault})`,
|
||||
text: `from langchain_fireworks import ChatFireworks\n\n${llmVarName} = ChatFireworks(${fireworksParamsOrDefault})`,
|
||||
apiKeyName: "FIREWORKS_API_KEY",
|
||||
packageName: "langchain-fireworks",
|
||||
default: false,
|
||||
@ -73,7 +102,7 @@ export default function ChatModelTabs(props) {
|
||||
{
|
||||
value: "MistralAI",
|
||||
label: "MistralAI",
|
||||
text: `from langchain_mistralai import ChatMistralAI\n\nmodel = ChatMistralAI(${mistralParamsOrDefault})`,
|
||||
text: `from langchain_mistralai import ChatMistralAI\n\n${llmVarName} = ChatMistralAI(${mistralParamsOrDefault})`,
|
||||
apiKeyName: "MISTRAL_API_KEY",
|
||||
packageName: "langchain-mistralai",
|
||||
default: false,
|
||||
@ -82,19 +111,37 @@ export default function ChatModelTabs(props) {
|
||||
{
|
||||
value: "Google",
|
||||
label: "Google",
|
||||
text: `from langchain_google_genai import ChatGoogleGenerativeAI\n\nmodel = ChatGoogleGenerativeAI(${googleParamsOrDefault})`,
|
||||
text: `from langchain_google_genai import ChatGoogleGenerativeAI\n\n${llmVarName} = ChatGoogleGenerativeAI(${googleParamsOrDefault})`,
|
||||
apiKeyName: "GOOGLE_API_KEY",
|
||||
packageName: "langchain-google-genai",
|
||||
default: false,
|
||||
shouldHide: hideGoogle,
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
value: "TogetherAI",
|
||||
label: "TogetherAI",
|
||||
text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${togetherParamsOrDefault})`,
|
||||
apiKeyName: "TOGETHER_API_KEY",
|
||||
packageName: "langchain-openai",
|
||||
default: false,
|
||||
shouldHide: hideTogether,
|
||||
},
|
||||
];
|
||||
|
||||
return (
|
||||
<Tabs groupId="modelTabs">
|
||||
{tabItems.filter((tabItem) => !tabItem.shouldHide).map((tabItem) => (
|
||||
<TabItem value={tabItem.value} label={tabItem.label} default={tabItem.default}>
|
||||
<Setup apiKeyName={tabItem.apiKeyName} packageName={tabItem.packageName} />
|
||||
{tabItems
|
||||
.filter((tabItem) => !tabItem.shouldHide)
|
||||
.map((tabItem) => (
|
||||
<TabItem
|
||||
value={tabItem.value}
|
||||
label={tabItem.label}
|
||||
default={tabItem.default}
|
||||
>
|
||||
<Setup
|
||||
apiKeyName={tabItem.apiKeyName}
|
||||
packageName={tabItem.packageName}
|
||||
/>
|
||||
<CodeBlock language="python">{tabItem.text}</CodeBlock>
|
||||
</TabItem>
|
||||
))}
|
||||
|
@ -4,9 +4,9 @@ yum -y update
|
||||
yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip -y
|
||||
|
||||
# install quarto
|
||||
wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.3.450/quarto-1.3.450-linux-amd64.tar.gz
|
||||
tar -xzf quarto-1.3.450-linux-amd64.tar.gz
|
||||
export PATH=$PATH:$(pwd)/quarto-1.3.450/bin/
|
||||
wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.4.552/quarto-1.4.552-linux-amd64.tar.gz
|
||||
tar -xzf quarto-1.4.552-linux-amd64.tar.gz
|
||||
export PATH=$PATH:$(pwd)/quarto-1.4.552/bin/
|
||||
|
||||
|
||||
# setup python env
|
||||
|
Loading…
Reference in New Issue
Block a user