mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-06 17:20:16 +00:00
Compare commits
59 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9045f7e0d | ||
|
|
7b4882a2f4 | ||
|
|
5d4b6e4d4e | ||
|
|
94ae126747 | ||
|
|
ae5695ad32 | ||
|
|
cacf4091c0 | ||
|
|
54f9e4287f | ||
|
|
c331009440 | ||
|
|
6086292252 | ||
|
|
b3916f74a7 | ||
|
|
f46f1d28af | ||
|
|
7728a848d0 | ||
|
|
f3da4dc6ba | ||
|
|
ae1b589f60 | ||
|
|
6a20f07f0d | ||
|
|
fb2d7afe71 | ||
|
|
1ad7973cc6 | ||
|
|
5f73d06502 | ||
|
|
248c297f1b | ||
|
|
213c2e33e5 | ||
|
|
2e0219cac0 | ||
|
|
966611bbfa | ||
|
|
7198a1cb22 | ||
|
|
5bb2952860 | ||
|
|
c658f0aed3 | ||
|
|
309d86e339 | ||
|
|
6ad360bdef | ||
|
|
5198d6f541 | ||
|
|
a5d003f0c9 | ||
|
|
924b7ecf89 | ||
|
|
fc19d14a65 | ||
|
|
b9ad214801 | ||
|
|
be7de427ca | ||
|
|
e2a7fed890 | ||
|
|
12dc7f26cc | ||
|
|
7129f23511 | ||
|
|
f273c50d62 | ||
|
|
1b89a438cf | ||
|
|
cc70565886 | ||
|
|
374e510f94 | ||
|
|
28efbb05bf | ||
|
|
d2f882158f | ||
|
|
a80897478e | ||
|
|
57609845df | ||
|
|
7f76a1189c | ||
|
|
2ba1128095 | ||
|
|
f9ddcb5705 | ||
|
|
fa6826e417 | ||
|
|
bd0bf4e0a9 | ||
|
|
9194a8be89 | ||
|
|
e3df8ab6dc | ||
|
|
0ffeabd14f | ||
|
|
499e54edda | ||
|
|
f62dbb018b | ||
|
|
18b1466893 | ||
|
|
2824f36401 | ||
|
|
d4f719c34b | ||
|
|
97c3544a1e | ||
|
|
b69b551c8b |
@@ -47,7 +47,7 @@ good code into the codebase.
|
||||
### 🏭Release process
|
||||
|
||||
As of now, LangChain has an ad hoc release process: releases are cut with high frequency via by
|
||||
a developer and published to [PyPI](https://pypi.org/project/ruff/).
|
||||
a developer and published to [PyPI](https://pypi.org/project/langchain/).
|
||||
|
||||
LangChain follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software,
|
||||
even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4).
|
||||
|
||||
20
README.md
20
README.md
@@ -4,6 +4,9 @@
|
||||
|
||||
[](https://github.com/hwchase17/langchain/actions/workflows/lint.yml) [](https://github.com/hwchase17/langchain/actions/workflows/test.yml) [](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml) [](https://opensource.org/licenses/MIT) [](https://twitter.com/langchainai) [](https://discord.gg/6adMQxSpJS)
|
||||
|
||||
**Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
|
||||
Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
|
||||
|
||||
## Quick Install
|
||||
|
||||
`pip install langchain`
|
||||
@@ -15,7 +18,22 @@ developers to build applications that they previously could not.
|
||||
But using these LLMs in isolation is often not enough to
|
||||
create a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.
|
||||
|
||||
This library is aimed at assisting in the development of those types of applications.
|
||||
This library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include:
|
||||
|
||||
**❓ Question Answering over specific documents**
|
||||
|
||||
- [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/question_answering.html)
|
||||
- End-to-end Example: [Question Answering over Notion Database](https://github.com/hwchase17/notion-qa)
|
||||
|
||||
**💬 Chatbots**
|
||||
|
||||
- [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/chatbots.html)
|
||||
- End-to-end Example: [Chat-LangChain](https://github.com/hwchase17/chat-langchain)
|
||||
|
||||
**🤖 Agents**
|
||||
|
||||
- [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/agents.html)
|
||||
- End-to-end Example: [GPT+WolframAlpha](https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain)
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
|
||||
@@ -7,7 +7,22 @@ But using these LLMs in isolation is often not enough to
|
||||
create a truly powerful app - the real power comes when you are able to
|
||||
combine them with other sources of computation or knowledge.
|
||||
|
||||
This library is aimed at assisting in the development of those types of applications.
|
||||
This library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include:
|
||||
|
||||
**❓ Question Answering over specific documents**
|
||||
|
||||
- `Documentation <./use_cases/question_answering.html>`_
|
||||
- End-to-end Example: `Question Answering over Notion Database <https://github.com/hwchase17/notion-qa>`_
|
||||
|
||||
**💬 Chatbots**
|
||||
|
||||
- `Documentation <./use_cases/chatbots.html>`_
|
||||
- End-to-end Example: `Chat-LangChain <https://github.com/hwchase17/chat-langchain>`_
|
||||
|
||||
**🤖 Agents**
|
||||
|
||||
- `Documentation <./use_cases/agents.html>`_
|
||||
- End-to-end Example: `GPT+WolframAlpha <https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain>`_
|
||||
|
||||
Getting Started
|
||||
----------------
|
||||
@@ -147,6 +162,10 @@ Additional collection of resources we think may be useful as you develop your ap
|
||||
|
||||
- `Discord <https://discord.gg/6adMQxSpJS>`_: Join us on our Discord to discuss all things LangChain!
|
||||
|
||||
- `Tracing <./tracing.html>`_: A guide on using tracing in LangChain to visualize the execution of chains and agents.
|
||||
|
||||
- `Production Support <https://forms.gle/57d8AmXBYp8PP8tZA>`_: As you move your LangChains into production, we'd love to offer more comprehensive support. Please fill out this form and we'll set up a dedicated support Slack channel.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
@@ -158,3 +177,6 @@ Additional collection of resources we think may be useful as you develop your ap
|
||||
./glossary.md
|
||||
./gallery.rst
|
||||
./deployments.md
|
||||
./tracing.md
|
||||
Discord <https://discord.gg/6adMQxSpJS>
|
||||
Production Support <https://forms.gle/57d8AmXBYp8PP8tZA>
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"id": "becda2a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -70,7 +70,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"id": "339b1bb8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -99,7 +99,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "e21d2098",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -134,7 +134,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "5e028e6d",
|
||||
"metadata": {},
|
||||
@@ -146,7 +145,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 5,
|
||||
"id": "9b1cc2a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -156,17 +155,18 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"execution_count": 7,
|
||||
"id": "e4f5092f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools)"
|
||||
"tool_names = [tool.name for tool in tools]\n",
|
||||
"agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"execution_count": 8,
|
||||
"id": "490604e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -176,7 +176,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"execution_count": 9,
|
||||
"id": "653b1617",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -191,22 +191,23 @@
|
||||
"Action: Search\n",
|
||||
"Action Input: Population of Canada\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mCanada is a country in North America. Its ten provinces and three territories extend from the Atlantic Ocean to the Pacific Ocean and northward into the Arctic Ocean, covering over 9.98 million square kilometres, making it the world's second-largest country by total area.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out the exact population of Canada\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out the population of Canada\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: Population of Canada 2020\u001b[0m\n",
|
||||
"Action Input: Population of Canada\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mCanada is a country in North America. Its ten provinces and three territories extend from the Atlantic Ocean to the Pacific Ocean and northward into the Arctic Ocean, covering over 9.98 million square kilometres, making it the world's second-largest country by total area.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the population of Canada\n",
|
||||
"Final Answer: Arrr, Canada be home to 37.59 million people!\u001b[0m\n",
|
||||
"\u001b[1m> Finished AgentExecutor chain.\u001b[0m\n"
|
||||
"Final Answer: Arrr, Canada be home to over 37 million people!\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Arrr, Canada be home to 37.59 million people!'"
|
||||
"'Arrr, Canada be home to over 37 million people!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 19,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -361,7 +362,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -375,7 +376,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.12 (default, Feb 15 2022, 17:41:09) \n[Clang 12.0.5 (clang-1205.0.22.11)]"
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -10,15 +10,17 @@
|
||||
"When constructing your own agent, you will need to provide it with a list of Tools that it can use. A Tool is defined as below.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"class Tool(NamedTuple):\n",
|
||||
"@dataclass \n",
|
||||
"class Tool:\n",
|
||||
" \"\"\"Interface for tools.\"\"\"\n",
|
||||
"\n",
|
||||
" name: str\n",
|
||||
" func: Callable[[str], str]\n",
|
||||
" description: Optional[str] = None\n",
|
||||
" return_direct: bool = True\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"The two required components of a Tool are the name and then the tool itself. A tool description is optional, as it is needed for some agents but not all."
|
||||
"The two required components of a Tool are the name and then the tool itself. A tool description is optional, as it is needed for some agents but not all. You can create these tools directly, but we also provide a decorator to easily convert any function into a tool."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -151,6 +153,94 @@
|
||||
"agent.run(\"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "824eaf74",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the `tool` decorator\n",
|
||||
"\n",
|
||||
"To make it easier to define custom tools, a `@tool` decorator is provided. This decorator can be used to quickly create a `Tool` from a simple function. The decorator uses the function name as the tool name by default, but this can be overridden by passing a string as the first argument. Additionally, the decorator will use the function's docstring as the tool's description."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "8f15307d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import tool\n",
|
||||
"\n",
|
||||
"@tool\n",
|
||||
"def search_api(query: str) -> str:\n",
|
||||
" \"\"\"Searches the API for the query.\"\"\"\n",
|
||||
" return \"Results\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "0a23b91b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Tool(name='search_api', func=<function search_api at 0x10dad7d90>, description='search_api(query: str) -> str - Searches the API for the query.', return_direct=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search_api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cc6ee8c1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can also provide arguments like the tool name and whether to return directly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "28cdf04d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"@tool(\"search\", return_direct=True)\n",
|
||||
"def search_api(query: str) -> str:\n",
|
||||
" \"\"\"Searches the API for the query.\"\"\"\n",
|
||||
" return \"Results\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "1085a4bd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Tool(name='search', func=<function search_api at 0x112301bd0>, description='search(query: str) -> str - Searches the API for the query.', return_direct=True)"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search_api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1d0430d6",
|
||||
@@ -432,7 +522,7 @@
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "cb23c3a7a387ab03496baa08507270f8e0861b23170e79d5edc545893cdca840"
|
||||
"hash": "e90c8aa204a57276aa905271aff2d11799d0acb3547adabc5892e639a5e45e34"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
108
docs/modules/agents/examples/load_from_hub.ipynb
Normal file
108
docs/modules/agents/examples/load_from_hub.ipynb
Normal file
@@ -0,0 +1,108 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "991b1cc1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Loading from LangChainHub\n",
|
||||
"\n",
|
||||
"This notebook covers how to load agents from [LangChainHub](https://github.com/hwchase17/langchain-hub)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "bd4450a2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m Yes.\n",
|
||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[36;1m\u001b[1;3m2016 · SUI · Stan Wawrinka ; 2017 · ESP · Rafael Nadal ; 2018 · SRB · Novak Djokovic ; 2019 · ESP · Rafael Nadal.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mSo the reigning men's U.S. Open champion is Rafael Nadal.\n",
|
||||
"Follow up: What is Rafael Nadal's hometown?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[36;1m\u001b[1;3mIn 2016, he once again showed his deep ties to Mallorca and opened the Rafa Nadal Academy in his hometown of Manacor.\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mSo the final answer is: Manacor, Mallorca, Spain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Manacor, Mallorca, Spain.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import OpenAI, SerpAPIWrapper\n",
|
||||
"from langchain.agents import initialize_agent, Tool\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"search = SerpAPIWrapper()\n",
|
||||
"tools = [\n",
|
||||
" Tool(\n",
|
||||
" name=\"Intermediate Answer\",\n",
|
||||
" func=search.run\n",
|
||||
" )\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"self_ask_with_search = initialize_agent(tools, llm, agent_path=\"lc://agents/self-ask-with-search/agent.json\", verbose=True)\n",
|
||||
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "3aede965",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Pinning Dependencies\n",
|
||||
"\n",
|
||||
"Specific versions of LangChainHub agents can be pinned with the `lc@<ref>://` syntax."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e679f7b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"self_ask_with_search = initialize_agent(tools, llm, agent_path=\"lc@2826ef9e8acdf88465e1e5fc8a7bf59e0f9d0a85://agents/self-ask-with-search/agent.json\", verbose=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
148
docs/modules/agents/examples/serialization.ipynb
Normal file
148
docs/modules/agents/examples/serialization.ipynb
Normal file
@@ -0,0 +1,148 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bfe18e28",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Serialization\n",
|
||||
"\n",
|
||||
"This notebook goes over how to serialize agents. For this notebook, it is important to understand the distinction we draw between `agents` and `tools`. An agent is the LLM powered decision maker that decides which actions to take and in which order. Tools are various instruments (functions) an agent has access to, through which an agent can interact with the outside world. When people generally use agents, they primarily talk about using an agent WITH tools. However, when we talk about serialization of agents, we are talking about the agent by itself. We plan to add support for serializing an agent WITH tools sometime in the future.\n",
|
||||
"\n",
|
||||
"Let's start by creating an agent with tools as we normally do:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "eb729f16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import load_tools\n",
|
||||
"from langchain.agents import initialize_agent\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0578f566",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now serialize the agent. To be explicit that we are serializing ONLY the agent, we will call the `save_agent` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "dc544de6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent.save_agent('agent.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "62dd45bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\r\n",
|
||||
" \"llm_chain\": {\r\n",
|
||||
" \"memory\": null,\r\n",
|
||||
" \"verbose\": false,\r\n",
|
||||
" \"prompt\": {\r\n",
|
||||
" \"input_variables\": [\r\n",
|
||||
" \"input\",\r\n",
|
||||
" \"agent_scratchpad\"\r\n",
|
||||
" ],\r\n",
|
||||
" \"output_parser\": null,\r\n",
|
||||
" \"template\": \"Answer the following questions as best you can. You have access to the following tools:\\n\\nSearch: A search engine. Useful for when you need to answer questions about current events. Input should be a search query.\\nCalculator: Useful for when you need to answer questions about math.\\n\\nUse the following format:\\n\\nQuestion: the input question you must answer\\nThought: you should always think about what to do\\nAction: the action to take, should be one of [Search, Calculator]\\nAction Input: the input to the action\\nObservation: the result of the action\\n... (this Thought/Action/Action Input/Observation can repeat N times)\\nThought: I now know the final answer\\nFinal Answer: the final answer to the original input question\\n\\nBegin!\\n\\nQuestion: {input}\\nThought:{agent_scratchpad}\",\r\n",
|
||||
" \"template_format\": \"f-string\"\r\n",
|
||||
" },\r\n",
|
||||
" \"llm\": {\r\n",
|
||||
" \"model_name\": \"text-davinci-003\",\r\n",
|
||||
" \"temperature\": 0.0,\r\n",
|
||||
" \"max_tokens\": 256,\r\n",
|
||||
" \"top_p\": 1,\r\n",
|
||||
" \"frequency_penalty\": 0,\r\n",
|
||||
" \"presence_penalty\": 0,\r\n",
|
||||
" \"n\": 1,\r\n",
|
||||
" \"best_of\": 1,\r\n",
|
||||
" \"request_timeout\": null,\r\n",
|
||||
" \"logit_bias\": {},\r\n",
|
||||
" \"_type\": \"openai\"\r\n",
|
||||
" },\r\n",
|
||||
" \"output_key\": \"text\",\r\n",
|
||||
" \"_type\": \"llm_chain\"\r\n",
|
||||
" },\r\n",
|
||||
" \"return_values\": [\r\n",
|
||||
" \"output\"\r\n",
|
||||
" ],\r\n",
|
||||
" \"_type\": \"zero-shot-react-description\"\r\n",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!cat agent.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0eb72510",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now load the agent back in"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "eb660b76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"agent = initialize_agent(tools, llm, agent_path=\"agent.json\", verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aa624ea5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -3,6 +3,8 @@ How-To Guides
|
||||
|
||||
The first category of how-to guides here cover specific parts of working with agents.
|
||||
|
||||
`Load From Hub <./examples/load_from_hub.html>`_: This notebook covers how to load agents from `LangChainHub <https://github.com/hwchase17/langchain-hub>`_.
|
||||
|
||||
`Custom Tools <./examples/custom_tools.html>`_: How to create custom tools that an agent can use.
|
||||
|
||||
`Intermediate Steps <./examples/intermediate_steps.html>`_: How to access and use intermediate steps to get more visibility into the internals of an agent.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import time
|
||||
|
||||
from langchain.chains.natbot.base import NatBotChain
|
||||
from langchain.chains.natbot.crawler import Crawler # type: ignore
|
||||
from langchain.chains.natbot.crawler import Crawler
|
||||
|
||||
|
||||
def run_cmd(cmd: str, _crawler: Crawler) -> None:
|
||||
|
||||
@@ -0,0 +1,199 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Vector DB Text Generation\n",
|
||||
"\n",
|
||||
"This notebook walks through how to use LangChain for text generation over a vector index. This is useful if we want to generate text that is able to draw from a large body of custom text, for example, generating blog posts that have an understanding of previous blog posts written, or product tutorials that can refer to product documentation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Prepare Data\n",
|
||||
"\n",
|
||||
"First, we prepare the data. For this example, we fetch a documentation site that consists of markdown files hosted on Github and split them into small enough Documents."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"import requests\n",
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores.faiss import FAISS\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"import pathlib\n",
|
||||
"import subprocess\n",
|
||||
"import tempfile"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Cloning into '.'...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def get_github_docs(repo_owner, repo_name):\n",
|
||||
" with tempfile.TemporaryDirectory() as d:\n",
|
||||
" subprocess.check_call(\n",
|
||||
" f\"git clone --depth 1 https://github.com/{repo_owner}/{repo_name}.git .\",\n",
|
||||
" cwd=d,\n",
|
||||
" shell=True,\n",
|
||||
" )\n",
|
||||
" git_sha = (\n",
|
||||
" subprocess.check_output(\"git rev-parse HEAD\", shell=True, cwd=d)\n",
|
||||
" .decode(\"utf-8\")\n",
|
||||
" .strip()\n",
|
||||
" )\n",
|
||||
" repo_path = pathlib.Path(d)\n",
|
||||
" markdown_files = list(repo_path.glob(\"*/*.md\")) + list(\n",
|
||||
" repo_path.glob(\"*/*.mdx\")\n",
|
||||
" )\n",
|
||||
" for markdown_file in markdown_files:\n",
|
||||
" with open(markdown_file, \"r\") as f:\n",
|
||||
" relative_path = markdown_file.relative_to(repo_path)\n",
|
||||
" github_url = f\"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}\"\n",
|
||||
" yield Document(page_content=f.read(), metadata={\"source\": github_url})\n",
|
||||
"\n",
|
||||
"sources = get_github_docs(\"yirenlu92\", \"deno-manual-forked\")\n",
|
||||
"\n",
|
||||
"source_chunks = []\n",
|
||||
"splitter = CharacterTextSplitter(separator=\" \", chunk_size=1024, chunk_overlap=0)\n",
|
||||
"for source in sources:\n",
|
||||
" for chunk in splitter.split_text(source.page_content):\n",
|
||||
" source_chunks.append(Document(page_content=chunk, metadata=source.metadata))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set Up Vector DB\n",
|
||||
"\n",
|
||||
"Now that we have the documentation content in chunks, let's put all this information in a vector index for easy retrieval."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search_index = FAISS.from_documents(source_chunks, OpenAIEmbeddings())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Set Up LLM Chain with Custom Prompt\n",
|
||||
"\n",
|
||||
"Next, let's set up a simple LLM chain but give it a custom prompt for blog post generation. Note that the custom prompt is parameterized and takes two inputs: `context`, which will be the documents fetched from the vector search, and `topic`, which is given by the user."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"prompt_template = \"\"\"Use the context below to write a 400 word blog post about the topic below:\n",
|
||||
" Context: {context}\n",
|
||||
" Topic: {topic}\n",
|
||||
" Blog post:\"\"\"\n",
|
||||
"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" template=prompt_template, input_variables=[\"context\", \"topic\"]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
"chain = LLMChain(llm=llm, prompt=PROMPT)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Generate Text\n",
|
||||
"\n",
|
||||
"Finally, we write a function to apply our inputs to the chain. The function takes an input parameter `topic`. We find the documents in the vector index that correspond to that `topic`, and use them as additional context in our simple LLM chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def generate_blog_post(topic):\n",
|
||||
" docs = search_index.similarity_search(topic, k=4)\n",
|
||||
" inputs = [{\"context\": doc.page_content, \"topic\": topic} for doc in docs]\n",
|
||||
" print(chain.apply(inputs))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[{'text': '\\n\\nEnvironment variables are a great way to store and access sensitive information in your Deno applications. Deno offers built-in support for environment variables with `Deno.env`, and you can also use a `.env` file to store and access environment variables.\\n\\nUsing `Deno.env` is simple. It has getter and setter methods, so you can easily set and retrieve environment variables. For example, you can set the `FIREBASE_API_KEY` and `FIREBASE_AUTH_DOMAIN` environment variables like this:\\n\\n```ts\\nDeno.env.set(\"FIREBASE_API_KEY\", \"examplekey123\");\\nDeno.env.set(\"FIREBASE_AUTH_DOMAIN\", \"firebasedomain.com\");\\n\\nconsole.log(Deno.env.get(\"FIREBASE_API_KEY\")); // examplekey123\\nconsole.log(Deno.env.get(\"FIREBASE_AUTH_DOMAIN\")); // firebasedomain.com\\n```\\n\\nYou can also store environment variables in a `.env` file. This is a great'}, {'text': '\\n\\nEnvironment variables are a powerful tool for managing configuration settings in a program. They allow us to set values that can be used by the program, without having to hard-code them into the code. This makes it easier to change settings without having to modify the code.\\n\\nIn Deno, environment variables can be set in a few different ways. The most common way is to use the `VAR=value` syntax. This will set the environment variable `VAR` to the value `value`. This can be used to set any number of environment variables before running a command. For example, if we wanted to set the environment variable `VAR` to `hello` before running a Deno command, we could do so like this:\\n\\n```\\nVAR=hello deno run main.ts\\n```\\n\\nThis will set the environment variable `VAR` to `hello` before running the command. We can then access this variable in our code using the `Deno.env.get()` function. For example, if we ran the following command:\\n\\n```\\nVAR=hello && deno eval \"console.log(\\'Deno: \\' + Deno.env.get(\\'VAR'}, {'text': '\\n\\nEnvironment variables are a powerful tool for developers, allowing them to store and access data without having to hard-code it into their applications. In Deno, you can access environment variables using the `Deno.env.get()` function.\\n\\nFor example, if you wanted to access the `HOME` environment variable, you could do so like this:\\n\\n```js\\n// env.js\\nDeno.env.get(\"HOME\");\\n```\\n\\nWhen running this code, you\\'ll need to grant the Deno process access to environment variables. This can be done by passing the `--allow-env` flag to the `deno run` command. You can also specify which environment variables you want to grant access to, like this:\\n\\n```shell\\n# Allow access to only the HOME env var\\ndeno run --allow-env=HOME env.js\\n```\\n\\nIt\\'s important to note that environment variables are case insensitive on Windows, so Deno also matches them case insensitively (on Windows only).\\n\\nAnother thing to be aware of when using environment variables is subprocess permissions. Subprocesses are powerful and can access system resources regardless of the permissions you granted to the Den'}, {'text': '\\n\\nEnvironment variables are an important part of any programming language, and Deno is no exception. Deno is a secure JavaScript and TypeScript runtime built on the V8 JavaScript engine, and it recently added support for environment variables. This feature was added in Deno version 1.6.0, and it is now available for use in Deno applications.\\n\\nEnvironment variables are used to store information that can be used by programs. They are typically used to store configuration information, such as the location of a database or the name of a user. In Deno, environment variables are stored in the `Deno.env` object. This object is similar to the `process.env` object in Node.js, and it allows you to access and set environment variables.\\n\\nThe `Deno.env` object is a read-only object, meaning that you cannot directly modify the environment variables. Instead, you must use the `Deno.env.set()` function to set environment variables. This function takes two arguments: the name of the environment variable and the value to set it to. For example, if you wanted to set the `FOO` environment variable to `bar`, you would use the following code:\\n\\n```'}]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"generate_blog_post(\"environment variables\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@@ -11,6 +11,8 @@ The examples here are all end-to-end chains for working with documents.
|
||||
|
||||
`Summarization <./combine_docs_examples/summarize.html>`_: A walkthrough of how to use LangChain for summarization over specific documents.
|
||||
|
||||
`Vector DB Text Generation <./combine_docs_examples/vector_db_text_generation.html>`_: A walkthrough of how to use LangChain for text generation over a vector database.
|
||||
|
||||
`Vector DB Question Answering <./combine_docs_examples/vector_db_qa.html>`_: A walkthrough of how to use LangChain for question answering over a vector database.
|
||||
|
||||
`Vector DB Question Answering with Sources <./combine_docs_examples/vector_db_qa_with_sources.html>`_: A walkthrough of how to use LangChain for question answering (with sources) over a vector database.
|
||||
|
||||
@@ -58,7 +58,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "a8fc8f23",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -68,7 +68,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"id": "15ff81df",
|
||||
"metadata": {
|
||||
"pycharm": {
|
||||
@@ -96,7 +96,7 @@
|
||||
"' There are 9 employees.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -188,6 +188,62 @@
|
||||
"db_chain.run(\"How many employees are there in the foobar table?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88d8b969",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Return Intermediate Steps\n",
|
||||
"\n",
|
||||
"You can also return the intermediate steps of the SQLDatabaseChain. This allows you to access the SQL statement that was generated, as well as the result of running that against the SQL Database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "38559487",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, prompt=PROMPT, verbose=True, return_intermediate_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "78b6af4d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SQLDatabaseChain chain...\u001b[0m\n",
|
||||
"How many employees are there in the foobar table? \n",
|
||||
"SQLQuery:\u001b[32;1m\u001b[1;3m SELECT COUNT(*) FROM Employee;\u001b[0m\n",
|
||||
"SQLResult: \u001b[33;1m\u001b[1;3m[(9,)]\u001b[0m\n",
|
||||
"Answer:\u001b[32;1m\u001b[1;3m There are 9 employees in the foobar table.\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[' SELECT COUNT(*) FROM Employee;', '[(9,)]']"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"result = db_chain(\"How many employees are there in the foobar table?\")\n",
|
||||
"result[\"intermediate_steps\"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b408f800",
|
||||
@@ -242,6 +298,74 @@
|
||||
"db_chain.run(\"What are some example tracks by composer Johann Sebastian Bach?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bcc5e936",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Adding first row of each table\n",
|
||||
"Sometimes, the format of the data is not obvious and it is optimal to include the first row of the table in the prompt to allow the LLM to understand the data before providing a final query. Here we will use this feature to let the LLM know that artists are saved with their full names."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "9a22ee47",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" \"sqlite:///../../../../notebooks/Chinook.db\", \n",
|
||||
" include_tables=['Track'], # we include only one table to save tokens in the prompt :)\n",
|
||||
" sample_row_in_table_info=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "bcb7a489",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "81e05d82",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SQLDatabaseChain chain...\u001b[0m\n",
|
||||
"What are some example tracks by Bach? \n",
|
||||
"SQLQuery:Table 'Track' has columns: TrackId (INTEGER), Name (NVARCHAR(200)), AlbumId (INTEGER), MediaTypeId (INTEGER), GenreId (INTEGER), Composer (NVARCHAR(220)), Milliseconds (INTEGER), Bytes (INTEGER), UnitPrice (NUMERIC(10, 2)). Here is an example row for this table (long strings are truncated): ['1', 'For Those About To Rock (We Salute You)', '1', '1', '1', 'Angus Young, Malcolm Young, Brian Johnson', '343719', '11170334', '0.99'].\n",
|
||||
"\u001b[32;1m\u001b[1;3m SELECT TrackId, Name, Composer FROM Track WHERE Composer LIKE '%Bach%' ORDER BY Name LIMIT 5;\u001b[0m\n",
|
||||
"SQLResult: \u001b[33;1m\u001b[1;3m[(1709, 'American Woman', 'B. Cummings/G. Peterson/M.J. Kale/R. Bachman'), (3408, 'Aria Mit 30 Veränderungen, BWV 988 \"Goldberg Variations\": Aria', 'Johann Sebastian Bach'), (3433, 'Concerto No.2 in F Major, BWV1047, I. Allegro', 'Johann Sebastian Bach'), (3407, 'Concerto for 2 Violins in D Minor, BWV 1043: I. Vivace', 'Johann Sebastian Bach'), (3490, 'Partita in E Major, BWV 1006A: I. Prelude', 'Johann Sebastian Bach')]\u001b[0m\n",
|
||||
"Answer:\u001b[32;1m\u001b[1;3m Some example tracks by Bach are 'American Woman', 'Aria Mit 30 Veränderungen, BWV 988 \"Goldberg Variations\": Aria', 'Concerto No.2 in F Major, BWV1047, I. Allegro', 'Concerto for 2 Violins in D Minor, BWV 1043: I. Vivace', and 'Partita in E Major, BWV 1006A: I. Prelude'.\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Some example tracks by Bach are \\'American Woman\\', \\'Aria Mit 30 Veränderungen, BWV 988 \"Goldberg Variations\": Aria\\', \\'Concerto No.2 in F Major, BWV1047, I. Allegro\\', \\'Concerto for 2 Violins in D Minor, BWV 1043: I. Vivace\\', and \\'Partita in E Major, BWV 1006A: I. Prelude\\'.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"db_chain.run(\"What are some example tracks by Bach?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c12ae15a",
|
||||
@@ -319,14 +443,6 @@
|
||||
"source": [
|
||||
"chain.run(\"How many employees are also customers?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b2998b03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
157
docs/modules/chains/generic/from_hub.ipynb
Normal file
157
docs/modules/chains/generic/from_hub.ipynb
Normal file
@@ -0,0 +1,157 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "25c90e9e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Loading from LangChainHub\n",
|
||||
"\n",
|
||||
"This notebook covers how to load chains from [LangChainHub](https://github.com/hwchase17/langchain-hub)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8b54479e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import load_chain\n",
|
||||
"\n",
|
||||
"chain = load_chain(\"lc://chains/llm-math/chain.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4828f31f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
||||
"whats 2 raised to .12\u001b[32;1m\u001b[1;3m\n",
|
||||
"Answer: 1.0791812460476249\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Answer: 1.0791812460476249'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"whats 2 raised to .12\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8db72cda",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Sometimes chains will require extra arguments that were not serialized with the chain. For example, a chain that does question answering over a vector database will require a vector database."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "aab39528",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain.vectorstores.faiss import FAISS\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain import OpenAI, VectorDBQA"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "16a85d5e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"with open('../../state_of_the_union.txt') as f:\n",
|
||||
" state_of_the_union = f.read()\n",
|
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||
"texts = text_splitter.split_text(state_of_the_union)\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"vectorstore = FAISS.from_texts(texts, embeddings)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "6a82e91e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_chain(\"lc://chains/vector-db-qa/stuff/chain.json\", vectorstore=vectorstore)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "efe9b25b",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The president said that Jackson is one of the nation's top legal minds, a former top litigator in private practice, a former federal public defender, and from a family of public school educators and police officers, and that she has received a broad range of support from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||
"chain.run(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f910a32f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
13
docs/modules/chains/generic/llm.json
Normal file
13
docs/modules/chains/generic/llm.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"model_name": "text-davinci-003",
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 256,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0,
|
||||
"n": 1,
|
||||
"best_of": 1,
|
||||
"request_timeout": null,
|
||||
"logit_bias": {},
|
||||
"_type": "openai"
|
||||
}
|
||||
27
docs/modules/chains/generic/llm_chain.json
Normal file
27
docs/modules/chains/generic/llm_chain.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"memory": null,
|
||||
"verbose": true,
|
||||
"prompt": {
|
||||
"input_variables": [
|
||||
"question"
|
||||
],
|
||||
"output_parser": null,
|
||||
"template": "Question: {question}\n\nAnswer: Let's think step by step.",
|
||||
"template_format": "f-string"
|
||||
},
|
||||
"llm": {
|
||||
"model_name": "text-davinci-003",
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 256,
|
||||
"top_p": 1,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0,
|
||||
"n": 1,
|
||||
"best_of": 1,
|
||||
"request_timeout": null,
|
||||
"logit_bias": {},
|
||||
"_type": "openai"
|
||||
},
|
||||
"output_key": "text",
|
||||
"_type": "llm_chain"
|
||||
}
|
||||
8
docs/modules/chains/generic/llm_chain_separate.json
Normal file
8
docs/modules/chains/generic/llm_chain_separate.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"memory": null,
|
||||
"verbose": true,
|
||||
"prompt_path": "prompt.json",
|
||||
"llm_path": "llm.json",
|
||||
"output_key": "text",
|
||||
"_type": "llm_chain"
|
||||
}
|
||||
8
docs/modules/chains/generic/prompt.json
Normal file
8
docs/modules/chains/generic/prompt.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"input_variables": [
|
||||
"question"
|
||||
],
|
||||
"output_parser": null,
|
||||
"template": "Question: {question}\n\nAnswer: Let's think step by step.",
|
||||
"template_format": "f-string"
|
||||
}
|
||||
376
docs/modules/chains/generic/serialization.ipynb
Normal file
376
docs/modules/chains/generic/serialization.ipynb
Normal file
@@ -0,0 +1,376 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cbe47c3a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Serialization\n",
|
||||
"This notebook covers how to serialize chains to and from disk. The serialization format we use is json or yaml. Currently, only some chains support this type of serialization. We will grow the number of supported chains over time.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e4a8a447",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving a chain to disk\n",
|
||||
"First, let's go over how to save a chain to disk. This can be done with the `.save` method, and specifying a file path with a json or yaml extension."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "26e28451",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain import PromptTemplate, OpenAI, LLMChain\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\"\"\"\n",
|
||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "bfa18e1f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain.save(\"llm_chain.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea82665d",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now take a look at what's inside this saved file"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0fd33328",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\r\n",
|
||||
" \"memory\": null,\r\n",
|
||||
" \"verbose\": true,\r\n",
|
||||
" \"prompt\": {\r\n",
|
||||
" \"input_variables\": [\r\n",
|
||||
" \"question\"\r\n",
|
||||
" ],\r\n",
|
||||
" \"output_parser\": null,\r\n",
|
||||
" \"template\": \"Question: {question}\\n\\nAnswer: Let's think step by step.\",\r\n",
|
||||
" \"template_format\": \"f-string\"\r\n",
|
||||
" },\r\n",
|
||||
" \"llm\": {\r\n",
|
||||
" \"model_name\": \"text-davinci-003\",\r\n",
|
||||
" \"temperature\": 0.0,\r\n",
|
||||
" \"max_tokens\": 256,\r\n",
|
||||
" \"top_p\": 1,\r\n",
|
||||
" \"frequency_penalty\": 0,\r\n",
|
||||
" \"presence_penalty\": 0,\r\n",
|
||||
" \"n\": 1,\r\n",
|
||||
" \"best_of\": 1,\r\n",
|
||||
" \"request_timeout\": null,\r\n",
|
||||
" \"logit_bias\": {},\r\n",
|
||||
" \"_type\": \"openai\"\r\n",
|
||||
" },\r\n",
|
||||
" \"output_key\": \"text\",\r\n",
|
||||
" \"_type\": \"llm_chain\"\r\n",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!cat llm_chain.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2012c724",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading a chain from disk\n",
|
||||
"We can load a chain from disk by using the `load_chain` method."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "342a1974",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import load_chain"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "394b7da8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_chain(\"llm_chain.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "20d99787",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: whats 2 + 2\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' 2 + 2 = 4'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"whats 2 + 2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "14449679",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Saving components separately\n",
|
||||
"In the above example, we can see that the prompt and llm configuration information is saved in the same json as the overall chain. Alternatively, we can split them up and save them separately. This is often useful to make the saved components more modular. In order to do this, we just need to specify `llm_path` instead of the `llm` component, and `prompt_path` instead of the `prompt` component."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "50ec35ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain.prompt.save(\"prompt.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "c48b39aa",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\r\n",
|
||||
" \"input_variables\": [\r\n",
|
||||
" \"question\"\r\n",
|
||||
" ],\r\n",
|
||||
" \"output_parser\": null,\r\n",
|
||||
" \"template\": \"Question: {question}\\n\\nAnswer: Let's think step by step.\",\r\n",
|
||||
" \"template_format\": \"f-string\"\r\n",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!cat prompt.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "13c92944",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm_chain.llm.save(\"llm.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "1b815f89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\r\n",
|
||||
" \"model_name\": \"text-davinci-003\",\r\n",
|
||||
" \"temperature\": 0.0,\r\n",
|
||||
" \"max_tokens\": 256,\r\n",
|
||||
" \"top_p\": 1,\r\n",
|
||||
" \"frequency_penalty\": 0,\r\n",
|
||||
" \"presence_penalty\": 0,\r\n",
|
||||
" \"n\": 1,\r\n",
|
||||
" \"best_of\": 1,\r\n",
|
||||
" \"request_timeout\": null,\r\n",
|
||||
" \"logit_bias\": {},\r\n",
|
||||
" \"_type\": \"openai\"\r\n",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!cat llm.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "7e6aa9ab",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"config = {\n",
|
||||
" \"memory\": None,\n",
|
||||
" \"verbose\": True,\n",
|
||||
" \"prompt_path\": \"prompt.json\",\n",
|
||||
" \"llm_path\": \"llm.json\",\n",
|
||||
" \"output_key\": \"text\",\n",
|
||||
" \"_type\": \"llm_chain\"\n",
|
||||
"}\n",
|
||||
"import json\n",
|
||||
"with open(\"llm_chain_separate.json\", \"w\") as f:\n",
|
||||
" json.dump(config, f, indent=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "8e959ca6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{\r\n",
|
||||
" \"memory\": null,\r\n",
|
||||
" \"verbose\": true,\r\n",
|
||||
" \"prompt_path\": \"prompt.json\",\r\n",
|
||||
" \"llm_path\": \"llm.json\",\r\n",
|
||||
" \"output_key\": \"text\",\r\n",
|
||||
" \"_type\": \"llm_chain\"\r\n",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!cat llm_chain_separate.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "662731c0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can then load it in the same way"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "d69ceb93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_chain(\"llm_chain_separate.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "a99d61b9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mQuestion: whats 2 + 2\n",
|
||||
"\n",
|
||||
"Answer: Let's think step by step.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' 2 + 2 = 4'"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(\"whats 2 + 2\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "822b7c12",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -18,3 +18,7 @@ They are broken up into three categories:
|
||||
./generic_how_to.rst
|
||||
./combine_docs_how_to.rst
|
||||
./utility_how_to.rst
|
||||
|
||||
In addition to different types of chains, we also have the following how-to guides for working with chains in general:
|
||||
|
||||
`Load From Hub <./generic/from_hub.html>`_: This notebook covers how to load chains from `LangChainHub <https://github.com/hwchase17/langchain-hub>`_.
|
||||
|
||||
@@ -34,7 +34,7 @@ Next, we'll create a custom prompt template that takes in the function name as i
|
||||
|
||||
```python
|
||||
from langchain.prompts import BasePromptTemplate
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
|
||||
class FunctionExplainerPromptTemplate(BasePromptTemplate, BaseModel):
|
||||
|
||||
@@ -77,7 +77,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "42f76e43",
|
||||
"metadata": {},
|
||||
@@ -138,7 +137,6 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "ed47bb62",
|
||||
"metadata": {},
|
||||
@@ -196,11 +194,79 @@
|
||||
"source": [
|
||||
"doc_result = embeddings.embed_documents([text])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fff4734f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## TensorflowHub\n",
|
||||
"Let's load the TensorflowHub Embedding class."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f822104b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.embeddings import TensorflowHubEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "bac84e46",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"2023-01-30 23:53:01.652176: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
|
||||
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
|
||||
"2023-01-30 23:53:34.362802: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
|
||||
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = TensorflowHubEmbeddings()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "4790d770",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"text = \"This is a test document.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f556dcdb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"query_result = embeddings.embed_query(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "90f0db94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "cohere",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -214,7 +280,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings, HypotheticalDocumentEmbedder\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain.chains import LLMChain, HypotheticalDocumentEmbedder\n",
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
@@ -220,7 +220,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "llm-env",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -234,7 +234,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.0 (default, Nov 15 2020, 06:25:35) \n[Clang 10.0.0 ]"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "9dd01537e9ab68cf47cb0398488d182358f774f73101197b3bd1b5502c6ec7f9"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "b118c9dc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Text Splitter\n",
|
||||
"\n",
|
||||
"When you want to deal wit long pieces of text, it is necessary to split up that text into chunks.\n",
|
||||
"When you want to deal with long pieces of text, it is necessary to split up that text into chunks.\n",
|
||||
"This notebook showcases several ways to do that.\n",
|
||||
"\n",
|
||||
"At a high level, text splitters work as following:\n",
|
||||
@@ -486,7 +487,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -500,7 +501,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
"version": "3.9.12 (main, Mar 26 2022, 15:51:15) \n[Clang 13.1.6 (clang-1316.0.21.2)]"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -13,24 +13,25 @@
|
||||
"source": [
|
||||
"This notebook goes over how to use the bing search component.\n",
|
||||
"\n",
|
||||
"First, you need to set up the proper API keys and environment variables. To set it up, follow the instructions found here.\n",
|
||||
"First, you need to set up the proper API keys and environment variables. To set it up, follow the instructions found [here](https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e).\n",
|
||||
"\n",
|
||||
"Then we will need to set some environment variables."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"BING_SUBSCRIPTION_KEY\"] = \"\""
|
||||
"os.environ[\"BING_SUBSCRIPTION_KEY\"] = \"\"\n",
|
||||
"os.environ[\"BING_SEARCH_URL\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -39,7 +40,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -48,16 +49,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Thanks to the flexibility of <b>Python</b> and the powerful ecosystem of packages, the Azure CLI supports features such as autocompletion (in shells that support it), persistent credentials, JMESPath result parsing, lazy initialization, network-less unit tests, and more. Building an open-source and cross-platform Azure CLI with <b>Python</b> by Dan Taylor. <b>Python</b> Brochure. <b>Python</b> is a programming language that lets you work more quickly and integrate your systems more effectively. You can learn to use <b>Python</b> and see almost immediate gains in productivity and lower maintenance costs. Learn more about <b>Python</b> . Learning. Before getting started, you may want to find out which IDEs and text editors are tailored to make <b>Python</b> editing easy, browse the list of introductory books, or look at code samples that you might find helpful.. There is a list of tutorials suitable for experienced programmers on the BeginnersGuide/Tutorials page. There is also a list of resources in other languages which might be ... <b>Python</b> is a popular programming language. <b>Python</b> can be used on a server to create web applications. Start learning <b>Python</b> now ». With <b>Python</b>, you can use while loops to run the same task multiple times and for loops to loop once over list data. In this module, you'll learn about the two loop types and when to apply each. Manage data with <b>Python</b> dictionaries. <b>Python</b> dictionaries allow you to model complex data. This module explores common scenarios where you could use ... This module is part of these learning paths. Build real world applications with <b>Python</b>. Introduction 1 min. What is <b>Python</b>? 3 min. Use the REPL 2 min. Variables and basic data types in <b>Python</b> 4 min. Exercise - output 1 min. Reading keyboard input 3 min. Exercise - Build a calculator 1 min. <b>Python</b>'s source code is freely available to the public, and its usage and distribution are unrestricted, including for commercial purposes. It is widely used for web development, and using it, practically anything can be created, including mobile apps, online apps, tools, data analytics, machine learning, and so on. ... <b>Python</b> is a high-level, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation. <b>Python</b> is dynamically-typed and garbage-collected. It supports multiple programming paradigms, including structured (particularly procedural), object-oriented and functional programming.'"
|
||||
"'Thanks to the flexibility of <b>Python</b> and the powerful ecosystem of packages, the Azure CLI supports features such as autocompletion (in shells that support it), persistent credentials, JMESPath result parsing, lazy initialization, network-less unit tests, and more. Building an open-source and cross-platform Azure CLI with <b>Python</b> by Dan Taylor. <b>Python</b> releases by version number: Release version Release date Click for more. <b>Python</b> 3.11.1 Dec. 6, 2022 Download Release Notes. <b>Python</b> 3.10.9 Dec. 6, 2022 Download Release Notes. <b>Python</b> 3.9.16 Dec. 6, 2022 Download Release Notes. <b>Python</b> 3.8.16 Dec. 6, 2022 Download Release Notes. <b>Python</b> 3.7.16 Dec. 6, 2022 Download Release Notes. In this lesson, we will look at the += operator in <b>Python</b> and see how it works with several simple examples.. The operator ‘+=’ is a shorthand for the addition assignment operator.It adds two values and assigns the sum to a variable (left operand). W3Schools offers free online tutorials, references and exercises in all the major languages of the web. Covering popular subjects like HTML, CSS, JavaScript, <b>Python</b>, SQL, Java, and many, many more. This tutorial introduces the reader informally to the basic concepts and features of the <b>Python</b> language and system. It helps to have a <b>Python</b> interpreter handy for hands-on experience, but all examples are self-contained, so the tutorial can be read off-line as well. For a description of standard objects and modules, see The <b>Python</b> Standard ... <b>Python</b> is a general-purpose, versatile, and powerful programming language. It's a great first language because <b>Python</b> code is concise and easy to read. Whatever you want to do, <b>python</b> can do it. From web development to machine learning to data science, <b>Python</b> is the language for you. To install <b>Python</b> using the Microsoft Store: Go to your Start menu (lower left Windows icon), type "Microsoft Store", select the link to open the store. Once the store is open, select Search from the upper-right menu and enter "<b>Python</b>". Select which version of <b>Python</b> you would like to use from the results under Apps. Under the “<b>Python</b> Releases for Mac OS X” heading, click the link for the Latest <b>Python</b> 3 Release - <b>Python</b> 3.x.x. As of this writing, the latest version was <b>Python</b> 3.8.4. Scroll to the bottom and click macOS 64-bit installer to start the download. When the installer is finished downloading, move on to the next step. Step 2: Run the Installer'"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -76,7 +77,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -85,7 +86,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@@ -94,7 +95,7 @@
|
||||
"'Thanks to the flexibility of <b>Python</b> and the powerful ecosystem of packages, the Azure CLI supports features such as autocompletion (in shells that support it), persistent credentials, JMESPath result parsing, lazy initialization, network-less unit tests, and more. Building an open-source and cross-platform Azure CLI with <b>Python</b> by Dan Taylor.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -103,12 +104,63 @@
|
||||
"search.run(\"python\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Metadata Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run query through BingSearch and return snippet, title, and link metadata.\n",
|
||||
"\n",
|
||||
"- Snippet: The description of the result.\n",
|
||||
"- Title: The title of the result.\n",
|
||||
"- Link: The link to the result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"search = BingSearchAPIWrapper()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'snippet': 'Lady Alice. Pink Lady <b>apples</b> aren’t the only lady in the apple family. Lady Alice <b>apples</b> were discovered growing, thanks to bees pollinating, in Washington. They are smaller and slightly more stout in appearance than other varieties. Their skin color appears to have red and yellow stripes running from stem to butt.',\n",
|
||||
" 'title': '25 Types of Apples - Jessica Gavin',\n",
|
||||
" 'link': 'https://www.jessicagavin.com/types-of-apples/'},\n",
|
||||
" {'snippet': '<b>Apples</b> can do a lot for you, thanks to plant chemicals called flavonoids. And they have pectin, a fiber that breaks down in your gut. If you take off the apple’s skin before eating it, you won ...',\n",
|
||||
" 'title': 'Apples: Nutrition & Health Benefits - WebMD',\n",
|
||||
" 'link': 'https://www.webmd.com/food-recipes/benefits-apples'},\n",
|
||||
" {'snippet': '<b>Apples</b> boast many vitamins and minerals, though not in high amounts. However, <b>apples</b> are usually a good source of vitamin C. Vitamin C. Also called ascorbic acid, this vitamin is a common ...',\n",
|
||||
" 'title': 'Apples 101: Nutrition Facts and Health Benefits',\n",
|
||||
" 'link': 'https://www.healthline.com/nutrition/foods/apples'},\n",
|
||||
" {'snippet': 'Weight management. The fibers in <b>apples</b> can slow digestion, helping one to feel greater satisfaction after eating. After following three large prospective cohorts of 133,468 men and women for 24 years, researchers found that higher intakes of fiber-rich fruits with a low glycemic load, particularly <b>apples</b> and pears, were associated with the least amount of weight gain over time.',\n",
|
||||
" 'title': 'Apples | The Nutrition Source | Harvard T.H. Chan School of Public Health',\n",
|
||||
" 'link': 'https://www.hsph.harvard.edu/nutritionsource/food-features/apples/'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.results(\"apples\", 5)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -16,19 +16,19 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "34bb5968",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"GOOGLE_CSE_ID\"] = \n",
|
||||
"os.environ[\"GOOGLE_API_KEY\"] = "
|
||||
"os.environ[\"GOOGLE_CSE_ID\"] = \"\"\n",
|
||||
"os.environ[\"GOOGLE_API_KEY\"] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 2,
|
||||
"id": "ac4910f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -38,7 +38,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 3,
|
||||
"id": "84b8f773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -48,17 +48,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"id": "068991a6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'STATE OF HAWAII. 1 Child\\'s First Name. (Type or print). 2. Sex. BARACK. 3. This Birth. CERTIFICATE OF LIVE BIRTH. FILE. NUMBER 151 le. lb. Middle Name. Barack Hussein Obama II is an American politician who served as the 44th president of the United States from 2009 to 2017. A member of the Democratic Party,\\xa0... First Lady Michelle LaVaughn Robinson Obama is a lawyer, writer, and the wife of the 44th President, Barack Obama. She is the first African-American First\\xa0... Barack Obama, in full Barack Hussein Obama II, (born August 4, 1961, Honolulu, Hawaii, U.S.), 44th president of the United States (2009–17) and the first\\xa0... Aug 18, 2017 ... It took him several seconds and multiple clues to remember former President Barack Obama\\'s first name. Miller knew that every answer had to\\xa0... Feb 9, 2015 ... Michael Jordan misspelled Barack Obama\\'s first name on 50th-birthday gift ... Knowing Obama is a Chicagoan and huge basketball fan,\\xa0... His full name is Barack Hussein Obama II. Since the “II” is simply because he was named for his father, his last name is Obama. Jan 16, 2007 ... 4, 1961, in Honolulu. His first name means \"one who is blessed\" in Swahili. While Obama\\'s father, Barack Hussein Obama Sr., was from Kenya, his\\xa0... Jan 19, 2017 ... Hopeful parents named their sons for the first Black president, whose name is a variation of the Hebrew name Baruch, which means “blessed”\\xa0... Feb 27, 2020 ... President Barack Obama was born Barack Hussein Obama, II, as shown here on his birth certificate here . As reported by Reuters here , his\\xa0...'"
|
||||
"'1 Child\\'s First Name. 2. 6. 7d. Street Address. 71. (Type or print). BARACK. Sex. 3. This Birth. 4. If Twin or Triplet,. Was Child Born. Barack Hussein Obama II is an American retired politician who served as the 44th president of the United States from 2009 to 2017. His full name is Barack Hussein Obama II. Since the “II” is simply because he was named for his father, his last name is Obama. Feb 9, 2015 ... Michael Jordan misspelled Barack Obama\\'s first name on 50th-birthday gift ... Knowing Obama is a Chicagoan and huge basketball fan,\\xa0... Aug 18, 2017 ... It took him several seconds and multiple clues to remember former President Barack Obama\\'s first name. Miller knew that every answer had to end\\xa0... First Lady Michelle LaVaughn Robinson Obama is a lawyer, writer, and the wife of the 44th President, Barack Obama. She is the first African-American First\\xa0... Barack Obama, in full Barack Hussein Obama II, (born August 4, 1961, Honolulu, Hawaii, U.S.), 44th president of the United States (2009–17) and the first\\xa0... When Barack Obama was elected president in 2008, he became the first African American to hold ... The Middle East remained a key foreign policy challenge. Feb 27, 2020 ... President Barack Obama was born Barack Hussein Obama, II, as shown here on his birth certificate here . As reported by Reuters here , his\\xa0... Jan 16, 2007 ... 4, 1961, in Honolulu. His first name means \"one who is blessed\" in Swahili. While Obama\\'s father, Barack Hussein Obama Sr., was from Kenya, his\\xa0...'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@@ -67,13 +67,118 @@
|
||||
"search.run(\"Obama's first name?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "074b7f07",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Number of Results\n",
|
||||
"You can use the `k` parameter to set the number of results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"id": "5083fbdd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"search = GoogleSearchAPIWrapper(k=1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "77aaa857",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'The official home of the Python Programming Language.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.run(\"python\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "11c8d94f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"'The official home of the Python Programming Language.'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "73473110",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Metadata Results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "109fe796",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Run query through GoogleSearch and return snippet, title, and link metadata.\n",
|
||||
"\n",
|
||||
"- Snippet: The description of the result.\n",
|
||||
"- Title: The title of the result.\n",
|
||||
"- Link: The link to the result."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "028f4cba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"search = GoogleSearchAPIWrapper()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "4d8f734f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'snippet': 'Discover the innovative world of Apple and shop everything iPhone, iPad, Apple Watch, Mac, and Apple TV, plus explore accessories, entertainment,\\xa0...',\n",
|
||||
" 'title': 'Apple',\n",
|
||||
" 'link': 'https://www.apple.com/'},\n",
|
||||
" {'snippet': \"Jul 10, 2022 ... Whether or not you're up on your apple trivia, no doubt you know how delicious this popular fruit is, and how nutritious. Apples are rich in\\xa0...\",\n",
|
||||
" 'title': '25 Types of Apples and What to Make With Them - Parade ...',\n",
|
||||
" 'link': 'https://parade.com/1330308/bethlipton/types-of-apples/'},\n",
|
||||
" {'snippet': 'An apple is an edible fruit produced by an apple tree (Malus domestica). Apple trees are cultivated worldwide and are the most widely grown species in the\\xa0...',\n",
|
||||
" 'title': 'Apple - Wikipedia',\n",
|
||||
" 'link': 'https://en.wikipedia.org/wiki/Apple'},\n",
|
||||
" {'snippet': 'Apples are a popular fruit. They contain antioxidants, vitamins, dietary fiber, and a range of other nutrients. Due to their varied nutrient content,\\xa0...',\n",
|
||||
" 'title': 'Apples: Benefits, nutrition, and tips',\n",
|
||||
" 'link': 'https://www.medicalnewstoday.com/articles/267290'},\n",
|
||||
" {'snippet': \"An apple is a crunchy, bright-colored fruit, one of the most popular in the United States. You've probably heard the age-old saying, “An apple a day keeps\\xa0...\",\n",
|
||||
" 'title': 'Apples: Nutrition & Health Benefits',\n",
|
||||
" 'link': 'https://www.webmd.com/food-recipes/benefits-apples'}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"search.results(\"apples\", 5)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
@@ -93,6 +198,11 @@
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
57
docs/tracing.md
Normal file
57
docs/tracing.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Tracing
|
||||
|
||||
By enabling tracing in your LangChain runs, you’ll be able to more effectively visualize, step through, and debug your chains and agents.
|
||||
|
||||
First, you should install tracing and set up your environment properly.
|
||||
You can use either a locally hosted version of this (uses Docker) or a cloud hosted version (in closed alpha).
|
||||
If you're interested in using the hosted platform, please fill out the form [here](https://forms.gle/tRCEMSeopZf6TE3b6).
|
||||
|
||||
|
||||
- [Locally Hosted Setup](./tracing/local_installation.md)
|
||||
- [Cloud Hosted Setup](./tracing/hosted_installation.md)
|
||||
|
||||
## Tracing Walkthrough
|
||||
|
||||
When you first access the UI, you should see a page with your tracing sessions.
|
||||
An initial one "default" should already be created for you.
|
||||
A session is just a way to group traces together.
|
||||
If you click on a session, it will take you to a page with no recorded traces that says "No Runs."
|
||||
You can create a new session with the new session form.
|
||||
|
||||

|
||||
|
||||
If we click on the `default` session, we can see that to start we have no traces stored.
|
||||
|
||||

|
||||
|
||||
If we now start running chains and agents with tracing enabled, we will see data show up here.
|
||||
To do so, we can run [this notebook](tracing/agent_with_tracing.ipynb) as an example.
|
||||
After running it, we will see an initial trace show up.
|
||||
|
||||

|
||||
|
||||
From here we can explore the trace at a high level by clicking on the arrow to show nested runs.
|
||||
We can keep on clicking further and further down to explore deeper and deeper.
|
||||
|
||||

|
||||
|
||||
We can also click on the "Explore" button of the top level run to dive even deeper.
|
||||
Here, we can see the inputs and outputs in full, as well as all the nested traces.
|
||||
|
||||

|
||||
|
||||
We can keep on exploring each of these nested traces in more detail.
|
||||
For example, here is the lowest level trace with the exact inputs/outputs to the LLM.
|
||||
|
||||

|
||||
|
||||
## Changing Sessions
|
||||
1. To initially record traces to a session other than `"default"`, you can set the `LANGCHAIN_SESSION` environment variable to the name of the session you want to record to:
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
os.environ["LANGCHAIN_SESSION"] = "my_session" # Make sure this session actually exists. You can create a new session in the UI.
|
||||
```
|
||||
|
||||
2. To switch sessions mid-script or mid-notebook, do NOT set the `LANGCHAIN_SESSION` environment variable. Instead: `langchain.set_tracing_callback_manager(session_name="my_session")`
|
||||
116
docs/tracing/agent_with_tracing.ipynb
Normal file
116
docs/tracing/agent_with_tracing.ipynb
Normal file
@@ -0,0 +1,116 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5371a9bb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tracing Walkthrough"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "17c04cc6-c93d-4b6c-a033-e897577f4ed1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\"\n",
|
||||
"\n",
|
||||
"## Uncomment this if using hosted setup.\n",
|
||||
"\n",
|
||||
"# os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://langchain-api-gateway-57eoxz8z.uc.gateway.dev\" \n",
|
||||
"\n",
|
||||
"## Uncomment this if you want traces to be recorded to \"my_session\" instead of default.\n",
|
||||
"\n",
|
||||
"# os.environ[\"LANGCHAIN_SESSION\"] = \"my_session\" \n",
|
||||
"\n",
|
||||
"## Better to set this environment variable in the terminal\n",
|
||||
"## Uncomment this if using hosted version. Replace \"my_api_key\" with your actual API Key.\n",
|
||||
"\n",
|
||||
"# os.environ[\"LANGCHAIN_API_KEY\"] = \"my_api_key\" \n",
|
||||
"\n",
|
||||
"import langchain\n",
|
||||
"from langchain.agents import Tool, initialize_agent, load_tools\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "bfa16b79-aa4b-4d41-a067-70d1f593f667",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3m I need to use a calculator to solve this.\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 2^.123243\u001b[0m\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mAnswer: 1.0891804557407723\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
|
||||
"Final Answer: 1.0891804557407723\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'1.0891804557407723'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Agent run with tracing. Ensure that OPENAI_API_KEY is set appropriately to run this example.\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"tools = load_tools([\"llm-math\"], llm=llm)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
" tools, llm, agent=\"zero-shot-react-description\", verbose=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"agent.run(\"What is 2 raised to .123243 power?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "25addd7f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
BIN
docs/tracing/default_empty.png
Normal file
BIN
docs/tracing/default_empty.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
BIN
docs/tracing/explore.png
Normal file
BIN
docs/tracing/explore.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 348 KiB |
BIN
docs/tracing/explore_llm.png
Normal file
BIN
docs/tracing/explore_llm.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 239 KiB |
BIN
docs/tracing/explore_trace.png
Normal file
BIN
docs/tracing/explore_trace.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 253 KiB |
BIN
docs/tracing/first_trace.png
Normal file
BIN
docs/tracing/first_trace.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 117 KiB |
BIN
docs/tracing/homepage.png
Normal file
BIN
docs/tracing/homepage.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 94 KiB |
36
docs/tracing/hosted_installation.md
Normal file
36
docs/tracing/hosted_installation.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Cloud Hosted Setup
|
||||
|
||||
We offer a hosted version of tracing at [langchainplus.vercel.app](https://langchainplus.vercel.app/). You can use this to view traces from your run without having to run the server locally.
|
||||
|
||||
Note: we are currently only offering this to a limited number of users. The hosted platform is VERY alpha, in active development, and data might be dropped at any time. Don't depend on data being persisted in the system long term and don't log traces that may contain sensitive information. If you're interested in using the hosted platform, please fill out the form [here](https://forms.gle/tRCEMSeopZf6TE3b6).
|
||||
|
||||
## Installation
|
||||
|
||||
1. Login to the system and click "API Key" in the top right corner. Generate a new key and keep it safe. You will need it to authenticate with the system.
|
||||
|
||||
## Environment Setup
|
||||
|
||||
After installation, you must now set up your environment to use tracing.
|
||||
|
||||
This can be done by setting an environment variable in your terminal by running `export LANGCHAIN_HANDLER=langchain`.
|
||||
|
||||
You can also do this by adding the below snippet to the top of every script. **IMPORTANT:** this must go at the VERY TOP of your script, before you import anything from `langchain`.
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
```
|
||||
|
||||
You will also need to set an environment variable to specify the endpoint and your API key. This can be done with the following environment variables:
|
||||
|
||||
1. `LANGCHAIN_ENDPOINT` = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
|
||||
2. `LANGCHAIN_API_KEY` - set this to the API key you generated during installation.
|
||||
|
||||
An example of adding all relevant environment variables is below:
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
os.environ["LANGCHAIN_ENDPOINT"] = "https://langchain-api-gateway-57eoxz8z.uc.gateway.dev"
|
||||
os.environ["LANGCHAIN_API_KEY"] = "my_api_key" # Don't commit this to your repo! Better to set it in your terminal.
|
||||
```
|
||||
35
docs/tracing/local_installation.md
Normal file
35
docs/tracing/local_installation.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Locally Hosted Setup
|
||||
|
||||
This page contains instructions for installing and then setting up the environment to use the locally hosted version of tracing.
|
||||
|
||||
## Installation
|
||||
|
||||
1. Ensure you have Docker installed (see [Get Docker](https://docs.docker.com/get-docker/)) and that it’s running.
|
||||
2. Install the latest version of `langchain`: `pip install langchain` or `pip install langchain -U` to upgrade your
|
||||
existing version.
|
||||
3. Run `langchain-server`
|
||||
1. This will spin up the server in the terminal.
|
||||
2. Once you see the terminal
|
||||
output `langchain-langchain-frontend-1 | ➜ Local: [http://localhost:4173/](http://localhost:4173/)`, navigate
|
||||
to [http://localhost:4173/](http://localhost:4173/)
|
||||
|
||||
4. You should see a page with your tracing sessions. See the overview page for a walkthrough of the UI.
|
||||
|
||||
5. Currently, trace data is not guaranteed to be persisted between runs of `langchain-server`. If you want to
|
||||
persist your data, you can mount a volume to the Docker container. See the [Docker docs](https://docs.docker.com/storage/volumes/) for more info.
|
||||
6. To stop the server, press `Ctrl+C` in the terminal where you ran `langchain-server`.
|
||||
|
||||
|
||||
## Environment Setup
|
||||
|
||||
After installation, you must now set up your environment to use tracing.
|
||||
|
||||
This can be done by setting an environment variable in your terminal by running `export LANGCHAIN_HANDLER=langchain`.
|
||||
|
||||
You can also do this by adding the below snippet to the top of every script. **IMPORTANT:** this must go at the VERY TOP of your script, before you import anything from `langchain`.
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
```
|
||||
|
||||
@@ -4,7 +4,11 @@ from typing import Optional
|
||||
|
||||
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
|
||||
from langchain.cache import BaseCache
|
||||
from langchain.callbacks import set_default_callback_manager, set_handler
|
||||
from langchain.callbacks import (
|
||||
set_default_callback_manager,
|
||||
set_handler,
|
||||
set_tracing_callback_manager,
|
||||
)
|
||||
from langchain.chains import (
|
||||
ConversationChain,
|
||||
LLMBashChain,
|
||||
@@ -68,4 +72,5 @@ __all__ = [
|
||||
"QAWithSourcesChain",
|
||||
"PALChain",
|
||||
"set_handler",
|
||||
"set_tracing_callback_manager",
|
||||
]
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
"""Interface for agents."""
|
||||
from langchain.agents.agent import Agent, AgentExecutor
|
||||
from langchain.agents.conversational.base import ConversationalAgent
|
||||
from langchain.agents.initialize import initialize_agent
|
||||
from langchain.agents.load_tools import get_all_tool_names, load_tools
|
||||
from langchain.agents.loading import initialize_agent
|
||||
from langchain.agents.loading import load_agent
|
||||
from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent
|
||||
from langchain.agents.react.base import ReActChain, ReActTextWorldAgent
|
||||
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.agents.tools import Tool, tool
|
||||
|
||||
__all__ = [
|
||||
"MRKLChain",
|
||||
@@ -15,10 +16,12 @@ __all__ = [
|
||||
"AgentExecutor",
|
||||
"Agent",
|
||||
"Tool",
|
||||
"tool",
|
||||
"initialize_agent",
|
||||
"ZeroShotAgent",
|
||||
"ReActTextWorldAgent",
|
||||
"load_tools",
|
||||
"get_all_tool_names",
|
||||
"ConversationalAgent",
|
||||
"load_agent",
|
||||
]
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
"""Chain that takes in an input and produces an action and action input."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, root_validator
|
||||
|
||||
from langchain.agents.tools import Tool
|
||||
@@ -30,6 +33,7 @@ class Agent(BaseModel):
|
||||
"""
|
||||
|
||||
llm_chain: LLMChain
|
||||
allowed_tools: Optional[List[str]] = None
|
||||
return_values: List[str] = ["output"]
|
||||
|
||||
@abstractmethod
|
||||
@@ -44,6 +48,29 @@ class Agent(BaseModel):
|
||||
def _stop(self) -> List[str]:
|
||||
return [f"\n{self.observation_prefix}"]
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> str:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
|
||||
return thoughts
|
||||
|
||||
def _get_next_action(self, full_inputs: Dict[str, str]) -> AgentAction:
|
||||
full_output = self.llm_chain.predict(**full_inputs)
|
||||
parsed_output = self._extract_tool_and_input(full_output)
|
||||
while parsed_output is None:
|
||||
full_output = self._fix_text(full_output)
|
||||
full_inputs["agent_scratchpad"] += full_output
|
||||
output = self.llm_chain.predict(**full_inputs)
|
||||
full_output += output
|
||||
parsed_output = self._extract_tool_and_input(full_output)
|
||||
return AgentAction(
|
||||
tool=parsed_output[0], tool_input=parsed_output[1], log=full_output
|
||||
)
|
||||
|
||||
def plan(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
@@ -57,24 +84,14 @@ class Agent(BaseModel):
|
||||
Returns:
|
||||
Action specifying what tool to use.
|
||||
"""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
|
||||
thoughts = self._construct_scratchpad(intermediate_steps)
|
||||
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
|
||||
full_inputs = {**kwargs, **new_inputs}
|
||||
full_output = self.llm_chain.predict(**full_inputs)
|
||||
parsed_output = self._extract_tool_and_input(full_output)
|
||||
while parsed_output is None:
|
||||
full_output = self._fix_text(full_output)
|
||||
full_inputs["agent_scratchpad"] += full_output
|
||||
output = self.llm_chain.predict(**full_inputs)
|
||||
full_output += output
|
||||
parsed_output = self._extract_tool_and_input(full_output)
|
||||
tool, tool_input = parsed_output
|
||||
if tool == self.finish_tool_name:
|
||||
return AgentFinish({"output": tool_input}, full_output)
|
||||
return AgentAction(tool, tool_input, full_output)
|
||||
|
||||
action = self._get_next_action(full_inputs)
|
||||
if action.tool == self.finish_tool_name:
|
||||
return AgentFinish({"output": action.tool_input}, action.log)
|
||||
return action
|
||||
|
||||
def prepare_for_new_call(self) -> None:
|
||||
"""Prepare the agent for new call, if needed."""
|
||||
@@ -146,7 +163,8 @@ class Agent(BaseModel):
|
||||
prompt=cls.create_prompt(tools),
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
return cls(llm_chain=llm_chain, **kwargs)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
return cls(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
|
||||
|
||||
def return_stopped_response(
|
||||
self,
|
||||
@@ -192,6 +210,50 @@ class Agent(BaseModel):
|
||||
f"got {early_stopping_method}"
|
||||
)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _agent_type(self) -> str:
|
||||
"""Return Identifier of agent type."""
|
||||
|
||||
def dict(self, **kwargs: Any) -> Dict:
|
||||
"""Return dictionary representation of agent."""
|
||||
_dict = super().dict()
|
||||
_dict["_type"] = self._agent_type
|
||||
return _dict
|
||||
|
||||
def save(self, file_path: Union[Path, str]) -> None:
|
||||
"""Save the agent.
|
||||
|
||||
Args:
|
||||
file_path: Path to file to save the agent to.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
# If working with agent executor
|
||||
agent.agent.save(file_path="path/agent.yaml")
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
if isinstance(file_path, str):
|
||||
save_path = Path(file_path)
|
||||
else:
|
||||
save_path = file_path
|
||||
|
||||
directory_path = save_path.parent
|
||||
directory_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Fetch dictionary to save
|
||||
agent_dict = self.dict()
|
||||
|
||||
if save_path.suffix == ".json":
|
||||
with open(file_path, "w") as f:
|
||||
json.dump(agent_dict, f, indent=4)
|
||||
elif save_path.suffix == ".yaml":
|
||||
with open(file_path, "w") as f:
|
||||
yaml.dump(agent_dict, f, default_flow_style=False)
|
||||
else:
|
||||
raise ValueError(f"{save_path} must be json or yaml")
|
||||
|
||||
|
||||
class AgentExecutor(Chain, BaseModel):
|
||||
"""Consists of an agent using tools."""
|
||||
@@ -199,7 +261,7 @@ class AgentExecutor(Chain, BaseModel):
|
||||
agent: Agent
|
||||
tools: List[Tool]
|
||||
return_intermediate_steps: bool = False
|
||||
max_iterations: Optional[int] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
early_stopping_method: str = "force"
|
||||
|
||||
@classmethod
|
||||
@@ -215,6 +277,31 @@ class AgentExecutor(Chain, BaseModel):
|
||||
agent=agent, tools=tools, callback_manager=callback_manager, **kwargs
|
||||
)
|
||||
|
||||
@root_validator()
|
||||
def validate_tools(cls, values: Dict) -> Dict:
|
||||
"""Validate that tools are compatible with agent."""
|
||||
agent = values["agent"]
|
||||
tools = values["tools"]
|
||||
if agent.allowed_tools is not None:
|
||||
if set(agent.allowed_tools) != set([tool.name for tool in tools]):
|
||||
raise ValueError(
|
||||
f"Allowed tools ({agent.allowed_tools}) different than "
|
||||
f"provided tools ({[tool.name for tool in tools]})"
|
||||
)
|
||||
return values
|
||||
|
||||
def save(self, file_path: Union[Path, str]) -> None:
|
||||
"""Raise error - saving not supported for Agent Executors."""
|
||||
raise ValueError(
|
||||
"Saving not supported for agent executors. "
|
||||
"If you are trying to save the agent, please use the "
|
||||
"`.save_agent(...)`"
|
||||
)
|
||||
|
||||
def save_agent(self, file_path: Union[Path, str]) -> None:
|
||||
"""Save the underlying agent."""
|
||||
return self.agent.save(file_path)
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Return the input keys.
|
||||
@@ -284,7 +371,7 @@ class AgentExecutor(Chain, BaseModel):
|
||||
observation = tool.func(output.tool_input)
|
||||
color = color_mapping[output.tool]
|
||||
return_direct = tool.return_direct
|
||||
except Exception as e:
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
self.callback_manager.on_tool_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
else:
|
||||
|
||||
@@ -18,6 +18,11 @@ class ConversationalAgent(Agent):
|
||||
|
||||
ai_prefix: str = "AI"
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
"""Return Identifier of agent type."""
|
||||
return "conversational-react-description"
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
@@ -70,8 +75,8 @@ class ConversationalAgent(Agent):
|
||||
return self.ai_prefix
|
||||
|
||||
def _extract_tool_and_input(self, llm_output: str) -> Optional[Tuple[str, str]]:
|
||||
if f"{self.ai_prefix}: " in llm_output:
|
||||
return self.ai_prefix, llm_output.split(f"{self.ai_prefix}: ")[-1]
|
||||
if f"{self.ai_prefix}:" in llm_output:
|
||||
return self.ai_prefix, llm_output.split(f"{self.ai_prefix}:")[-1].strip()
|
||||
regex = r"Action: (.*?)\nAction Input: (.*)"
|
||||
match = re.search(regex, llm_output)
|
||||
if not match:
|
||||
@@ -86,18 +91,29 @@ class ConversationalAgent(Agent):
|
||||
llm: BaseLLM,
|
||||
tools: List[Tool],
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = PREFIX,
|
||||
suffix: str = SUFFIX,
|
||||
ai_prefix: str = "AI",
|
||||
human_prefix: str = "Human",
|
||||
input_variables: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Agent:
|
||||
"""Construct an agent from an LLM and tools."""
|
||||
cls._validate_tools(tools)
|
||||
prompt = cls.create_prompt(
|
||||
tools, ai_prefix=ai_prefix, human_prefix=human_prefix
|
||||
tools,
|
||||
ai_prefix=ai_prefix,
|
||||
human_prefix=human_prefix,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
input_variables=input_variables,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
return cls(llm_chain=llm_chain, ai_prefix=ai_prefix, **kwargs)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
return cls(
|
||||
llm_chain=llm_chain, allowed_tools=tool_names, ai_prefix=ai_prefix, **kwargs
|
||||
)
|
||||
|
||||
72
langchain/agents/initialize.py
Normal file
72
langchain/agents/initialize.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Load agent."""
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.llms.base import BaseLLM
|
||||
|
||||
|
||||
def initialize_agent(
|
||||
tools: List[Tool],
|
||||
llm: BaseLLM,
|
||||
agent: Optional[str] = None,
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
agent_path: Optional[str] = None,
|
||||
agent_kwargs: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Load agent given tools and LLM.
|
||||
|
||||
Args:
|
||||
tools: List of tools this agent has access to.
|
||||
llm: Language model to use as the agent.
|
||||
agent: The agent to use. Valid options are:
|
||||
`zero-shot-react-description`
|
||||
`react-docstore`
|
||||
`self-ask-with-search`
|
||||
`conversational-react-description`
|
||||
If None and agent_path is also None, will default to
|
||||
`zero-shot-react-description`.
|
||||
callback_manager: CallbackManager to use. Global callback manager is used if
|
||||
not provided. Defaults to None.
|
||||
agent_path: Path to serialized agent to use.
|
||||
**kwargs: Additional key word arguments to pass to the agent.
|
||||
|
||||
Returns:
|
||||
An agent.
|
||||
"""
|
||||
if agent is None and agent_path is None:
|
||||
agent = "zero-shot-react-description"
|
||||
if agent is not None and agent_path is not None:
|
||||
raise ValueError(
|
||||
"Both `agent` and `agent_path` are specified, "
|
||||
"but at most only one should be."
|
||||
)
|
||||
if agent is not None:
|
||||
if agent not in AGENT_TO_CLASS:
|
||||
raise ValueError(
|
||||
f"Got unknown agent type: {agent}. "
|
||||
f"Valid types are: {AGENT_TO_CLASS.keys()}."
|
||||
)
|
||||
agent_cls = AGENT_TO_CLASS[agent]
|
||||
agent_kwargs = agent_kwargs or {}
|
||||
agent_obj = agent_cls.from_llm_and_tools(
|
||||
llm, tools, callback_manager=callback_manager, **agent_kwargs
|
||||
)
|
||||
elif agent_path is not None:
|
||||
agent_obj = load_agent(
|
||||
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Somehow both `agent` and `agent_path` are None, "
|
||||
"this should never happen."
|
||||
)
|
||||
return AgentExecutor.from_agent_and_tools(
|
||||
agent=agent_obj,
|
||||
tools=tools,
|
||||
callback_manager=callback_manager,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -1,14 +1,19 @@
|
||||
"""Load agent."""
|
||||
from typing import Any, List, Optional
|
||||
"""Functionality for loading agents."""
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
import yaml
|
||||
|
||||
from langchain.agents.agent import Agent
|
||||
from langchain.agents.conversational.base import ConversationalAgent
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
from langchain.agents.react.base import ReActDocstoreAgent
|
||||
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.loading import load_chain, load_chain_from_config
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.utilities.loading import try_load_from_hub
|
||||
|
||||
AGENT_TO_CLASS = {
|
||||
"zero-shot-react-description": ZeroShotAgent,
|
||||
@@ -17,43 +22,86 @@ AGENT_TO_CLASS = {
|
||||
"conversational-react-description": ConversationalAgent,
|
||||
}
|
||||
|
||||
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
|
||||
|
||||
def initialize_agent(
|
||||
tools: List[Tool],
|
||||
llm: BaseLLM,
|
||||
agent: str = "zero-shot-react-description",
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
|
||||
def _load_agent_from_tools(
|
||||
config: dict, llm: BaseLLM, tools: List[Tool], **kwargs: Any
|
||||
) -> Agent:
|
||||
config_type = config.pop("_type")
|
||||
if config_type not in AGENT_TO_CLASS:
|
||||
raise ValueError(f"Loading {config_type} agent not supported")
|
||||
|
||||
if config_type not in AGENT_TO_CLASS:
|
||||
raise ValueError(f"Loading {config_type} agent not supported")
|
||||
agent_cls = AGENT_TO_CLASS[config_type]
|
||||
combined_config = {**config, **kwargs}
|
||||
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
|
||||
|
||||
|
||||
def load_agent_from_config(
|
||||
config: dict,
|
||||
llm: Optional[BaseLLM] = None,
|
||||
tools: Optional[List[Tool]] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgentExecutor:
|
||||
"""Load agent given tools and LLM.
|
||||
) -> Agent:
|
||||
"""Load agent from Config Dict."""
|
||||
if "_type" not in config:
|
||||
raise ValueError("Must specify an agent Type in config")
|
||||
load_from_tools = config.pop("load_from_llm_and_tools", False)
|
||||
if load_from_tools:
|
||||
if llm is None:
|
||||
raise ValueError(
|
||||
"If `load_from_llm_and_tools` is set to True, "
|
||||
"then LLM must be provided"
|
||||
)
|
||||
if tools is None:
|
||||
raise ValueError(
|
||||
"If `load_from_llm_and_tools` is set to True, "
|
||||
"then tools must be provided"
|
||||
)
|
||||
return _load_agent_from_tools(config, llm, tools, **kwargs)
|
||||
config_type = config.pop("_type")
|
||||
|
||||
Args:
|
||||
tools: List of tools this agent has access to.
|
||||
llm: Language model to use as the agent.
|
||||
agent: The agent to use. Valid options are:
|
||||
`zero-shot-react-description`
|
||||
`react-docstore`
|
||||
`self-ask-with-search`
|
||||
`conversational-react-description`.
|
||||
callback_manager: CallbackManager to use. Global callback manager is used if
|
||||
not provided. Defaults to None.
|
||||
**kwargs: Additional key word arguments to pass to the agent.
|
||||
if config_type not in AGENT_TO_CLASS:
|
||||
raise ValueError(f"Loading {config_type} agent not supported")
|
||||
|
||||
Returns:
|
||||
An agent.
|
||||
"""
|
||||
if agent not in AGENT_TO_CLASS:
|
||||
raise ValueError(
|
||||
f"Got unknown agent type: {agent}. "
|
||||
f"Valid types are: {AGENT_TO_CLASS.keys()}."
|
||||
)
|
||||
agent_cls = AGENT_TO_CLASS[agent]
|
||||
agent_obj = agent_cls.from_llm_and_tools(
|
||||
llm, tools, callback_manager=callback_manager
|
||||
)
|
||||
return AgentExecutor.from_agent_and_tools(
|
||||
agent=agent_obj,
|
||||
tools=tools,
|
||||
callback_manager=callback_manager,
|
||||
**kwargs,
|
||||
)
|
||||
agent_cls = AGENT_TO_CLASS[config_type]
|
||||
if "llm_chain" in config:
|
||||
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
|
||||
elif "llm_chain_path" in config:
|
||||
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
|
||||
combined_config = {**config, **kwargs}
|
||||
return agent_cls(**combined_config) # type: ignore
|
||||
|
||||
|
||||
def load_agent(path: Union[str, Path], **kwargs: Any) -> Agent:
|
||||
"""Unified method for loading a agent from LangChainHub or local fs."""
|
||||
if hub_result := try_load_from_hub(
|
||||
path, _load_agent_from_file, "agents", {"json", "yaml"}
|
||||
):
|
||||
return hub_result
|
||||
else:
|
||||
return _load_agent_from_file(path, **kwargs)
|
||||
|
||||
|
||||
def _load_agent_from_file(file: Union[str, Path], **kwargs: Any) -> Agent:
|
||||
"""Load agent from file."""
|
||||
# Convert file to Path object.
|
||||
if isinstance(file, str):
|
||||
file_path = Path(file)
|
||||
else:
|
||||
file_path = file
|
||||
# Load from either json or yaml.
|
||||
if file_path.suffix == ".json":
|
||||
with open(file_path) as f:
|
||||
config = json.load(f)
|
||||
elif file_path.suffix == ".yaml":
|
||||
with open(file_path, "r") as f:
|
||||
config = yaml.safe_load(f)
|
||||
else:
|
||||
raise ValueError("File type must be json or yaml")
|
||||
# Load the agent from the config now.
|
||||
return load_agent_from_config(config, **kwargs)
|
||||
|
||||
@@ -7,6 +7,8 @@ from typing import Any, Callable, List, NamedTuple, Optional, Tuple
|
||||
from langchain.agents.agent import Agent, AgentExecutor
|
||||
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts import PromptTemplate
|
||||
|
||||
@@ -49,6 +51,11 @@ def get_action_and_input(llm_output: str) -> Tuple[str, str]:
|
||||
class ZeroShotAgent(Agent):
|
||||
"""Agent for the MRKL chain."""
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
"""Return Identifier of agent type."""
|
||||
return "zero-shot-react-description"
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
@@ -87,6 +94,30 @@ class ZeroShotAgent(Agent):
|
||||
input_variables = ["input", "agent_scratchpad"]
|
||||
return PromptTemplate(template=template, input_variables=input_variables)
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
cls,
|
||||
llm: BaseLLM,
|
||||
tools: List[Tool],
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = PREFIX,
|
||||
suffix: str = SUFFIX,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Agent:
|
||||
"""Construct an agent from an LLM and tools."""
|
||||
cls._validate_tools(tools)
|
||||
prompt = cls.create_prompt(
|
||||
tools, prefix=prefix, suffix=suffix, input_variables=input_variables
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
return cls(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def _validate_tools(cls, tools: List[Tool]) -> None:
|
||||
for tool in tools:
|
||||
|
||||
@@ -17,6 +17,11 @@ from langchain.prompts.base import BasePromptTemplate
|
||||
class ReActDocstoreAgent(Agent, BaseModel):
|
||||
"""Agent for the ReAct chain."""
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
"""Return Identifier of agent type."""
|
||||
return "react-docstore"
|
||||
|
||||
@classmethod
|
||||
def create_prompt(cls, tools: List[Tool]) -> BasePromptTemplate:
|
||||
"""Return default prompt."""
|
||||
|
||||
@@ -12,6 +12,11 @@ from langchain.serpapi import SerpAPIWrapper
|
||||
class SelfAskWithSearchAgent(Agent):
|
||||
"""Agent for the self-ask-with-search paper."""
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
"""Return Identifier of agent type."""
|
||||
return "self-ask-with-search"
|
||||
|
||||
@classmethod
|
||||
def create_prompt(cls, tools: List[Tool]) -> BasePromptTemplate:
|
||||
"""Prompt does not depend on tools."""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Interface for tools."""
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, Optional
|
||||
from inspect import signature
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -11,3 +12,65 @@ class Tool:
|
||||
func: Callable[[str], str]
|
||||
description: Optional[str] = None
|
||||
return_direct: bool = False
|
||||
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> str:
|
||||
"""Make tools callable by piping through to `func`."""
|
||||
return self.func(*args, **kwargs)
|
||||
|
||||
|
||||
def tool(
|
||||
*args: Union[str, Callable], return_direct: bool = False
|
||||
) -> Union[Callable, Tool]:
|
||||
"""Make tools out of functions, can be used with or without arguments.
|
||||
|
||||
Requires:
|
||||
- Function must be of type (str) -> str
|
||||
- Function must have a docstring
|
||||
|
||||
Examples:
|
||||
.. code-block:: python
|
||||
|
||||
@tool
|
||||
def search_api(query: str) -> str:
|
||||
# Searches the API for the query.
|
||||
return
|
||||
|
||||
@tool("search", return_direct=True)
|
||||
def search_api(query: str) -> str:
|
||||
# Searches the API for the query.
|
||||
return
|
||||
"""
|
||||
|
||||
def _make_with_name(tool_name: str) -> Callable:
|
||||
def _make_tool(func: Callable[[str], str]) -> Tool:
|
||||
assert func.__doc__, "Function must have a docstring"
|
||||
# Description example:
|
||||
# search_api(query: str) - Searches the API for the query.
|
||||
description = f"{tool_name}{signature(func)} - {func.__doc__.strip()}"
|
||||
tool = Tool(
|
||||
name=tool_name,
|
||||
func=func,
|
||||
description=description,
|
||||
return_direct=return_direct,
|
||||
)
|
||||
return tool
|
||||
|
||||
return _make_tool
|
||||
|
||||
if len(args) == 1 and isinstance(args[0], str):
|
||||
# if the argument is a string, then we use the string as the tool name
|
||||
# Example usage: @tool("search", return_direct=True)
|
||||
return _make_with_name(args[0])
|
||||
elif len(args) == 1 and callable(args[0]):
|
||||
# if the argument is a function, then we use the function name as the tool name
|
||||
# Example usage: @tool
|
||||
return _make_with_name(args[0].__name__)(args[0])
|
||||
elif len(args) == 0:
|
||||
# if there are no arguments, then we use the function name as the tool name
|
||||
# Example usage: @tool(return_direct=True)
|
||||
def _partial(func: Callable[[str], str]) -> Tool:
|
||||
return _make_with_name(func.__name__)(func)
|
||||
|
||||
return _partial
|
||||
else:
|
||||
raise ValueError("Too many arguments for tool decorator")
|
||||
|
||||
@@ -4,8 +4,7 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from sqlalchemy import Column, Integer, String, create_engine, select
|
||||
from sqlalchemy.engine.base import Engine
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import Session
|
||||
from sqlalchemy.orm import Session, declarative_base
|
||||
|
||||
from langchain.schema import Generation
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
"""Callback handlers that allow listening to events in LangChain."""
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
from typing import Generator
|
||||
from typing import Generator, Optional
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler, BaseCallbackManager
|
||||
from langchain.callbacks.openai_info import OpenAICallbackHandler
|
||||
from langchain.callbacks.shared import SharedCallbackManager
|
||||
from langchain.callbacks.stdout import StdOutCallbackHandler
|
||||
from langchain.callbacks.tracers import SharedLangChainTracer
|
||||
|
||||
|
||||
def get_callback_manager() -> BaseCallbackManager:
|
||||
@@ -21,7 +23,31 @@ def set_handler(handler: BaseCallbackHandler) -> None:
|
||||
|
||||
def set_default_callback_manager() -> None:
|
||||
"""Set default callback manager."""
|
||||
set_handler(StdOutCallbackHandler())
|
||||
default_handler = os.environ.get("LANGCHAIN_HANDLER", "stdout")
|
||||
if default_handler == "stdout":
|
||||
set_handler(StdOutCallbackHandler())
|
||||
elif default_handler == "langchain":
|
||||
session = os.environ.get("LANGCHAIN_SESSION")
|
||||
set_tracing_callback_manager(session)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"LANGCHAIN_HANDLER should be one of `stdout` "
|
||||
f"or `langchain`, got {default_handler}"
|
||||
)
|
||||
|
||||
|
||||
def set_tracing_callback_manager(session_name: Optional[str] = None) -> None:
|
||||
"""Set tracing callback manager."""
|
||||
handler = SharedLangChainTracer()
|
||||
callback = get_callback_manager()
|
||||
callback.set_handlers([handler, StdOutCallbackHandler()])
|
||||
if session_name is None:
|
||||
handler.load_default_session()
|
||||
else:
|
||||
try:
|
||||
handler.load_session(session_name)
|
||||
except Exception:
|
||||
raise ValueError(f"session {session_name} not found")
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
||||
@@ -1,25 +1,34 @@
|
||||
"""Base callback handler that can be used to handle callbacks from langchain."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
class BaseCallbackHandler(BaseModel, ABC):
|
||||
class BaseCallbackHandler(ABC):
|
||||
"""Base callback handler that can be used to handle callbacks from langchain."""
|
||||
|
||||
ignore_llm: bool = False
|
||||
ignore_chain: bool = False
|
||||
ignore_agent: bool = False
|
||||
|
||||
@property
|
||||
def always_verbose(self) -> bool:
|
||||
"""Whether to call verbose callbacks even if verbose is False."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def ignore_llm(self) -> bool:
|
||||
"""Whether to ignore LLM callbacks."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def ignore_chain(self) -> bool:
|
||||
"""Whether to ignore chain callbacks."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def ignore_agent(self) -> bool:
|
||||
"""Whether to ignore agent callbacks."""
|
||||
return False
|
||||
|
||||
@abstractmethod
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
@@ -31,7 +40,9 @@ class BaseCallbackHandler(BaseModel, ABC):
|
||||
"""Run when LLM ends running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
|
||||
@abstractmethod
|
||||
@@ -45,7 +56,9 @@ class BaseCallbackHandler(BaseModel, ABC):
|
||||
"""Run when chain ends running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
|
||||
@abstractmethod
|
||||
@@ -59,7 +72,9 @@ class BaseCallbackHandler(BaseModel, ABC):
|
||||
"""Run when tool ends running."""
|
||||
|
||||
@abstractmethod
|
||||
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
|
||||
@abstractmethod
|
||||
@@ -82,15 +97,21 @@ class BaseCallbackManager(BaseCallbackHandler, ABC):
|
||||
def remove_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Remove a handler from the callback manager."""
|
||||
|
||||
@abstractmethod
|
||||
def set_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Set handler as the only handler on the callback manager."""
|
||||
self.set_handlers([handler])
|
||||
|
||||
@abstractmethod
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
|
||||
|
||||
class CallbackManager(BaseCallbackManager):
|
||||
"""Callback manager that can be used to handle callbacks from langchain."""
|
||||
|
||||
handlers: List[BaseCallbackHandler]
|
||||
def __init__(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Initialize callback manager."""
|
||||
self.handlers: List[BaseCallbackHandler] = handlers
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
@@ -115,7 +136,10 @@ class CallbackManager(BaseCallbackManager):
|
||||
handler.on_llm_end(response)
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Exception, verbose: bool = False, **kwargs: Any
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
for handler in self.handlers:
|
||||
@@ -146,7 +170,10 @@ class CallbackManager(BaseCallbackManager):
|
||||
handler.on_chain_end(outputs)
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Exception, verbose: bool = False, **kwargs: Any
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
for handler in self.handlers:
|
||||
@@ -175,7 +202,10 @@ class CallbackManager(BaseCallbackManager):
|
||||
handler.on_tool_end(output, **kwargs)
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Exception, verbose: bool = False, **kwargs: Any
|
||||
self,
|
||||
error: Union[Exception, KeyboardInterrupt],
|
||||
verbose: bool = False,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
for handler in self.handlers:
|
||||
@@ -206,6 +236,6 @@ class CallbackManager(BaseCallbackManager):
|
||||
"""Remove a handler from the callback manager."""
|
||||
self.handlers.remove(handler)
|
||||
|
||||
def set_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Set handler as the only handler on the callback manager."""
|
||||
self.handlers = [handler]
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
self.handlers = handlers
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Callback Handler that prints to std out."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
@@ -29,7 +29,9 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
if "total_tokens" in token_usage:
|
||||
self.total_tokens += token_usage["total_tokens"]
|
||||
|
||||
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
@@ -43,7 +45,9 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we finished a chain."""
|
||||
pass
|
||||
|
||||
def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
@@ -68,7 +72,9 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
"""If not the final action, print out observation."""
|
||||
pass
|
||||
|
||||
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""A shared CallbackManager."""
|
||||
|
||||
import threading
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from langchain.callbacks.base import (
|
||||
BaseCallbackHandler,
|
||||
@@ -46,7 +46,9 @@ class SharedCallbackManager(Singleton, BaseCallbackManager):
|
||||
with self._lock:
|
||||
self._callback_manager.on_llm_end(response, **kwargs)
|
||||
|
||||
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when LLM errors."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_llm_error(error, **kwargs)
|
||||
@@ -63,7 +65,9 @@ class SharedCallbackManager(Singleton, BaseCallbackManager):
|
||||
with self._lock:
|
||||
self._callback_manager.on_chain_end(outputs, **kwargs)
|
||||
|
||||
def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when chain errors."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_chain_error(error, **kwargs)
|
||||
@@ -80,7 +84,9 @@ class SharedCallbackManager(Singleton, BaseCallbackManager):
|
||||
with self._lock:
|
||||
self._callback_manager.on_tool_end(output, **kwargs)
|
||||
|
||||
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Run when tool errors."""
|
||||
with self._lock:
|
||||
self._callback_manager.on_tool_error(error, **kwargs)
|
||||
@@ -105,7 +111,7 @@ class SharedCallbackManager(Singleton, BaseCallbackManager):
|
||||
with self._lock:
|
||||
self._callback_manager.remove_handler(callback)
|
||||
|
||||
def set_handler(self, handler: BaseCallbackHandler) -> None:
|
||||
"""Set handler as the only handler on the callback manager."""
|
||||
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
|
||||
"""Set handlers as the only handlers on the callback manager."""
|
||||
with self._lock:
|
||||
self._callback_manager.handlers = [handler]
|
||||
self._callback_manager.handlers = handlers
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Callback Handler that prints to std out."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.input import print_text
|
||||
@@ -19,7 +19,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
@@ -34,7 +36,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we finished a chain."""
|
||||
print("\n\033[1m> Finished chain.\033[0m")
|
||||
|
||||
def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
@@ -61,7 +65,9 @@ class StdOutCallbackHandler(BaseCallbackHandler):
|
||||
print_text(output, color=color)
|
||||
print_text(f"\n{llm_prefix}")
|
||||
|
||||
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"""Callback Handler that logs to streamlit."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import streamlit as st
|
||||
|
||||
@@ -22,7 +22,9 @@ class StreamlitCallbackHandler(BaseCallbackHandler):
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
@@ -37,7 +39,9 @@ class StreamlitCallbackHandler(BaseCallbackHandler):
|
||||
"""Print out that we finished a chain."""
|
||||
st.write("Finished chain.")
|
||||
|
||||
def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
@@ -62,7 +66,9 @@ class StreamlitCallbackHandler(BaseCallbackHandler):
|
||||
st.write(f"{observation_prefix}{output}")
|
||||
st.write(llm_prefix)
|
||||
|
||||
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Do nothing."""
|
||||
pass
|
||||
|
||||
|
||||
12
langchain/callbacks/tracers/__init__.py
Normal file
12
langchain/callbacks/tracers/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""Tracers that record execution of LangChain runs."""
|
||||
|
||||
from langchain.callbacks.tracers.base import SharedTracer, Tracer
|
||||
from langchain.callbacks.tracers.langchain import BaseLangChainTracer
|
||||
|
||||
|
||||
class SharedLangChainTracer(SharedTracer, BaseLangChainTracer):
|
||||
"""Shared tracer that records LangChain execution to LangChain endpoint."""
|
||||
|
||||
|
||||
class LangChainTracer(Tracer, BaseLangChainTracer):
|
||||
"""Tracer that records LangChain execution to LangChain endpoint."""
|
||||
334
langchain/callbacks/tracers/base.py
Normal file
334
langchain/callbacks/tracers/base.py
Normal file
@@ -0,0 +1,334 @@
|
||||
"""Base interfaces for tracing runs."""
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.callbacks.shared import Singleton
|
||||
from langchain.callbacks.tracers.schemas import (
|
||||
ChainRun,
|
||||
LLMRun,
|
||||
ToolRun,
|
||||
TracerSession,
|
||||
TracerSessionCreate,
|
||||
)
|
||||
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
||||
|
||||
|
||||
class TracerException(Exception):
|
||||
"""Base class for exceptions in tracers module."""
|
||||
|
||||
|
||||
class BaseTracer(BaseCallbackHandler, ABC):
|
||||
"""Base interface for tracers."""
|
||||
|
||||
@abstractmethod
|
||||
def _add_child_run(
|
||||
self,
|
||||
parent_run: Union[ChainRun, ToolRun],
|
||||
child_run: Union[LLMRun, ChainRun, ToolRun],
|
||||
) -> None:
|
||||
"""Add child run to a chain run or tool run."""
|
||||
|
||||
@abstractmethod
|
||||
def _persist_run(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
"""Persist a run."""
|
||||
|
||||
@abstractmethod
|
||||
def _persist_session(self, session: TracerSessionCreate) -> TracerSession:
|
||||
"""Persist a tracing session."""
|
||||
|
||||
@abstractmethod
|
||||
def _generate_id(self) -> Optional[Union[int, str]]:
|
||||
"""Generate an id for a run."""
|
||||
|
||||
def new_session(self, name: Optional[str] = None, **kwargs: Any) -> TracerSession:
|
||||
"""NOT thread safe, do not call this method from multiple threads."""
|
||||
session_create = TracerSessionCreate(name=name, extra=kwargs)
|
||||
session = self._persist_session(session_create)
|
||||
self._session = session
|
||||
return session
|
||||
|
||||
@abstractmethod
|
||||
def load_session(self, session_name: str) -> TracerSession:
|
||||
"""Load a tracing session and set it as the Tracer's session."""
|
||||
|
||||
@abstractmethod
|
||||
def load_default_session(self) -> TracerSession:
|
||||
"""Load the default tracing session and set it as the Tracer's session."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _stack(self) -> List[Union[LLMRun, ChainRun, ToolRun]]:
|
||||
"""Get the tracer stack."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _execution_order(self) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
|
||||
@_execution_order.setter
|
||||
@abstractmethod
|
||||
def _execution_order(self, value: int) -> None:
|
||||
"""Set the execution order for a run."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _session(self) -> Optional[TracerSession]:
|
||||
"""Get the tracing session."""
|
||||
|
||||
@_session.setter
|
||||
@abstractmethod
|
||||
def _session(self, value: TracerSession) -> None:
|
||||
"""Set the tracing session."""
|
||||
|
||||
def _start_trace(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
"""Start a trace for a run."""
|
||||
self._execution_order += 1
|
||||
|
||||
if self._stack:
|
||||
if not (
|
||||
isinstance(self._stack[-1], ChainRun)
|
||||
or isinstance(self._stack[-1], ToolRun)
|
||||
):
|
||||
raise TracerException(
|
||||
f"Nested {run.__class__.__name__} can only be"
|
||||
f" logged inside a ChainRun or ToolRun"
|
||||
)
|
||||
self._add_child_run(self._stack[-1], run)
|
||||
self._stack.append(run)
|
||||
|
||||
def _end_trace(self) -> None:
|
||||
"""End a trace for a run."""
|
||||
run = self._stack.pop()
|
||||
if not self._stack:
|
||||
self._execution_order = 1
|
||||
self._persist_run(run)
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
"""Start a trace for an LLM run."""
|
||||
if self._session is None:
|
||||
raise TracerException(
|
||||
"Initialize a session with `new_session()` before starting a trace."
|
||||
)
|
||||
|
||||
llm_run = LLMRun(
|
||||
serialized=serialized,
|
||||
prompts=prompts,
|
||||
extra=kwargs,
|
||||
start_time=datetime.utcnow(),
|
||||
execution_order=self._execution_order,
|
||||
session_id=self._session.id,
|
||||
id=self._generate_id(),
|
||||
)
|
||||
self._start_trace(llm_run)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
"""End a trace for an LLM run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], LLMRun):
|
||||
raise TracerException("No LLMRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].response = response
|
||||
|
||||
self._end_trace()
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Handle an error for an LLM run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], LLMRun):
|
||||
raise TracerException("No LLMRun found to be traced")
|
||||
|
||||
self._stack[-1].error = repr(error)
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
|
||||
self._end_trace()
|
||||
|
||||
def on_chain_start(
|
||||
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
||||
) -> None:
|
||||
"""Start a trace for a chain run."""
|
||||
if self._session is None:
|
||||
raise TracerException(
|
||||
"Initialize a session with `new_session()` before starting a trace."
|
||||
)
|
||||
|
||||
chain_run = ChainRun(
|
||||
serialized=serialized,
|
||||
inputs=inputs,
|
||||
extra=kwargs,
|
||||
start_time=datetime.utcnow(),
|
||||
execution_order=self._execution_order,
|
||||
child_runs=[],
|
||||
session_id=self._session.id,
|
||||
id=self._generate_id(),
|
||||
)
|
||||
self._start_trace(chain_run)
|
||||
|
||||
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
||||
"""End a trace for a chain run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ChainRun):
|
||||
raise TracerException("No ChainRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].outputs = outputs
|
||||
|
||||
self._end_trace()
|
||||
|
||||
def on_chain_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Handle an error for a chain run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ChainRun):
|
||||
raise TracerException("No ChainRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].error = repr(error)
|
||||
|
||||
self._end_trace()
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], action: AgentAction, **kwargs: Any
|
||||
) -> None:
|
||||
"""Start a trace for a tool run."""
|
||||
if self._session is None:
|
||||
raise TracerException(
|
||||
"Initialize a session with `new_session()` before starting a trace."
|
||||
)
|
||||
|
||||
tool_run = ToolRun(
|
||||
serialized=serialized,
|
||||
action=action.tool,
|
||||
tool_input=action.tool_input,
|
||||
extra=kwargs,
|
||||
start_time=datetime.utcnow(),
|
||||
execution_order=self._execution_order,
|
||||
child_runs=[],
|
||||
session_id=self._session.id,
|
||||
id=self._generate_id(),
|
||||
)
|
||||
self._start_trace(tool_run)
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
||||
"""End a trace for a tool run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ToolRun):
|
||||
raise TracerException("No ToolRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].output = output
|
||||
|
||||
self._end_trace()
|
||||
|
||||
def on_tool_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
) -> None:
|
||||
"""Handle an error for a tool run."""
|
||||
if not self._stack or not isinstance(self._stack[-1], ToolRun):
|
||||
raise TracerException("No ToolRun found to be traced")
|
||||
|
||||
self._stack[-1].end_time = datetime.utcnow()
|
||||
self._stack[-1].error = repr(error)
|
||||
|
||||
self._end_trace()
|
||||
|
||||
def on_text(self, text: str, **kwargs: Any) -> None:
|
||||
"""Handle a text message."""
|
||||
pass
|
||||
|
||||
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
|
||||
"""Handle an agent finish message."""
|
||||
pass
|
||||
|
||||
|
||||
class Tracer(BaseTracer, ABC):
|
||||
"""A non-thread safe implementation of the BaseTracer interface."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize a tracer."""
|
||||
self._tracer_stack: List[Union[LLMRun, ChainRun, ToolRun]] = []
|
||||
self._tracer_execution_order = 1
|
||||
self._tracer_session: Optional[TracerSession] = None
|
||||
|
||||
@property
|
||||
def _stack(self) -> List[Union[LLMRun, ChainRun, ToolRun]]:
|
||||
"""Get the tracer stack."""
|
||||
return self._tracer_stack
|
||||
|
||||
@property
|
||||
def _execution_order(self) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
return self._tracer_execution_order
|
||||
|
||||
@_execution_order.setter
|
||||
def _execution_order(self, value: int) -> None:
|
||||
"""Set the execution order for a run."""
|
||||
self._tracer_execution_order = value
|
||||
|
||||
@property
|
||||
def _session(self) -> Optional[TracerSession]:
|
||||
"""Get the tracing session."""
|
||||
return self._tracer_session
|
||||
|
||||
@_session.setter
|
||||
def _session(self, value: TracerSession) -> None:
|
||||
"""Set the tracing session."""
|
||||
if self._stack:
|
||||
raise TracerException(
|
||||
"Cannot set a session while a trace is being recorded"
|
||||
)
|
||||
self._tracer_session = value
|
||||
|
||||
|
||||
@dataclass
|
||||
class TracerStack(threading.local):
|
||||
"""A stack of runs used for logging."""
|
||||
|
||||
stack: List[Union[LLMRun, ChainRun, ToolRun]] = field(default_factory=list)
|
||||
execution_order: int = 1
|
||||
|
||||
|
||||
class SharedTracer(Singleton, BaseTracer, ABC):
|
||||
"""A thread-safe Singleton implementation of BaseTracer."""
|
||||
|
||||
_tracer_stack = TracerStack()
|
||||
_tracer_session = None
|
||||
|
||||
@property
|
||||
def _stack(self) -> List[Union[LLMRun, ChainRun, ToolRun]]:
|
||||
"""Get the tracer stack."""
|
||||
return self._tracer_stack.stack
|
||||
|
||||
@property
|
||||
def _execution_order(self) -> int:
|
||||
"""Get the execution order for a run."""
|
||||
return self._tracer_stack.execution_order
|
||||
|
||||
@_execution_order.setter
|
||||
def _execution_order(self, value: int) -> None:
|
||||
"""Set the execution order for a run."""
|
||||
self._tracer_stack.execution_order = value
|
||||
|
||||
@property
|
||||
def _session(self) -> Optional[TracerSession]:
|
||||
"""Get the tracing session."""
|
||||
return self._tracer_session
|
||||
|
||||
@_session.setter
|
||||
def _session(self, value: TracerSession) -> None:
|
||||
"""Set the tracing session."""
|
||||
with self._lock:
|
||||
# TODO: currently, we are only checking current thread's stack.
|
||||
# Need to make sure that we are not in the middle of a trace
|
||||
# in any thread.
|
||||
if self._stack:
|
||||
raise TracerException(
|
||||
"Cannot set a session while a trace is being recorded"
|
||||
)
|
||||
self._tracer_session = value
|
||||
112
langchain/callbacks/tracers/langchain.py
Normal file
112
langchain/callbacks/tracers/langchain.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""A Tracer implementation that records to LangChain endpoint."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from langchain.callbacks.tracers.base import BaseTracer
|
||||
from langchain.callbacks.tracers.schemas import (
|
||||
ChainRun,
|
||||
LLMRun,
|
||||
ToolRun,
|
||||
TracerSession,
|
||||
TracerSessionCreate,
|
||||
)
|
||||
|
||||
|
||||
class BaseLangChainTracer(BaseTracer, ABC):
|
||||
"""An implementation of the SharedTracer that POSTS to the langchain endpoint."""
|
||||
|
||||
always_verbose: bool = True
|
||||
_endpoint: str = os.getenv("LANGCHAIN_ENDPOINT", "http://localhost:8000")
|
||||
_headers: Dict[str, Any] = {"Content-Type": "application/json"}
|
||||
if os.getenv("LANGCHAIN_API_KEY"):
|
||||
_headers["x-api-key"] = os.getenv("LANGCHAIN_API_KEY")
|
||||
|
||||
def _persist_run(self, run: Union[LLMRun, ChainRun, ToolRun]) -> None:
|
||||
"""Persist a run."""
|
||||
if isinstance(run, LLMRun):
|
||||
endpoint = f"{self._endpoint}/llm-runs"
|
||||
elif isinstance(run, ChainRun):
|
||||
endpoint = f"{self._endpoint}/chain-runs"
|
||||
else:
|
||||
endpoint = f"{self._endpoint}/tool-runs"
|
||||
|
||||
try:
|
||||
requests.post(
|
||||
endpoint,
|
||||
data=run.json(),
|
||||
headers=self._headers,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to persist run: {e}")
|
||||
|
||||
def _persist_session(self, session_create: TracerSessionCreate) -> TracerSession:
|
||||
"""Persist a session."""
|
||||
try:
|
||||
r = requests.post(
|
||||
f"{self._endpoint}/sessions",
|
||||
data=session_create.json(),
|
||||
headers=self._headers,
|
||||
)
|
||||
session = TracerSession(id=r.json()["id"], **session_create.dict())
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to create session, using default session: {e}")
|
||||
session = TracerSession(id=1, **session_create.dict())
|
||||
return session
|
||||
|
||||
def load_session(self, session_name: str) -> TracerSession:
|
||||
"""Load a session from the tracer."""
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self._endpoint}/sessions?name={session_name}",
|
||||
headers=self._headers,
|
||||
)
|
||||
tracer_session = TracerSession(**r.json()[0])
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
f"Failed to load session {session_name}, using empty session: {e}"
|
||||
)
|
||||
tracer_session = TracerSession(id=1)
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
|
||||
def load_default_session(self) -> TracerSession:
|
||||
"""Load the default tracing session and set it as the Tracer's session."""
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self._endpoint}/sessions",
|
||||
headers=self._headers,
|
||||
)
|
||||
# Use the first session result
|
||||
tracer_session = TracerSession(**r.json()[0])
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to default session, using empty session: {e}")
|
||||
tracer_session = TracerSession(id=1)
|
||||
self._session = tracer_session
|
||||
return tracer_session
|
||||
|
||||
def _add_child_run(
|
||||
self,
|
||||
parent_run: Union[ChainRun, ToolRun],
|
||||
child_run: Union[LLMRun, ChainRun, ToolRun],
|
||||
) -> None:
|
||||
"""Add child run to a chain run or tool run."""
|
||||
if isinstance(child_run, LLMRun):
|
||||
parent_run.child_llm_runs.append(child_run)
|
||||
elif isinstance(child_run, ChainRun):
|
||||
parent_run.child_chain_runs.append(child_run)
|
||||
else:
|
||||
parent_run.child_tool_runs.append(child_run)
|
||||
|
||||
def _generate_id(self) -> Optional[Union[int, str]]:
|
||||
"""Generate an id for a run."""
|
||||
return None
|
||||
76
langchain/callbacks/tracers/schemas.py
Normal file
76
langchain/callbacks/tracers/schemas.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Schemas for tracers."""
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
|
||||
class TracerSessionBase(BaseModel):
|
||||
"""Base class for TracerSession."""
|
||||
|
||||
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
|
||||
name: Optional[str] = None
|
||||
extra: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class TracerSessionCreate(TracerSessionBase):
|
||||
"""Create class for TracerSession."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class TracerSession(TracerSessionBase):
|
||||
"""TracerSession schema."""
|
||||
|
||||
id: int
|
||||
|
||||
|
||||
class BaseRun(BaseModel):
|
||||
"""Base class for Run."""
|
||||
|
||||
id: Optional[Union[int, str]] = None
|
||||
start_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
|
||||
end_time: datetime.datetime = Field(default_factory=datetime.datetime.utcnow)
|
||||
extra: Optional[Dict[str, Any]] = None
|
||||
execution_order: int
|
||||
serialized: Dict[str, Any]
|
||||
session_id: int
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class LLMRun(BaseRun):
|
||||
"""Class for LLMRun."""
|
||||
|
||||
prompts: List[str]
|
||||
response: Optional[LLMResult] = None
|
||||
|
||||
|
||||
class ChainRun(BaseRun):
|
||||
"""Class for ChainRun."""
|
||||
|
||||
inputs: Dict[str, Any]
|
||||
outputs: Optional[Dict[str, Any]] = None
|
||||
child_llm_runs: List[LLMRun] = Field(default_factory=list)
|
||||
child_chain_runs: List[ChainRun] = Field(default_factory=list)
|
||||
child_tool_runs: List[ToolRun] = Field(default_factory=list)
|
||||
child_runs: List[Union[LLMRun, ChainRun, ToolRun]] = Field(default_factory=list)
|
||||
|
||||
|
||||
class ToolRun(BaseRun):
|
||||
"""Class for ToolRun."""
|
||||
|
||||
tool_input: str
|
||||
output: Optional[str] = None
|
||||
action: str
|
||||
child_llm_runs: List[LLMRun] = Field(default_factory=list)
|
||||
child_chain_runs: List[ChainRun] = Field(default_factory=list)
|
||||
child_tool_runs: List[ToolRun] = Field(default_factory=list)
|
||||
child_runs: List[Union[LLMRun, ChainRun, ToolRun]] = Field(default_factory=list)
|
||||
|
||||
|
||||
ChainRun.update_forward_refs()
|
||||
ToolRun.update_forward_refs()
|
||||
@@ -1,11 +1,13 @@
|
||||
"""Chains are easily reusable components which can be linked together."""
|
||||
from langchain.chains.api.base import APIChain
|
||||
from langchain.chains.conversation.base import ConversationChain
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_bash.base import LLMBashChain
|
||||
from langchain.chains.llm_checker.base import LLMCheckerChain
|
||||
from langchain.chains.llm_math.base import LLMMathChain
|
||||
from langchain.chains.llm_requests import LLMRequestsChain
|
||||
from langchain.chains.loading import load_chain
|
||||
from langchain.chains.mapreduce import MapReduceChain
|
||||
from langchain.chains.moderation import OpenAIModerationChain
|
||||
from langchain.chains.pal.base import PALChain
|
||||
@@ -39,4 +41,6 @@ __all__ = [
|
||||
"MapReduceChain",
|
||||
"OpenAIModerationChain",
|
||||
"SQLDatabaseSequentialChain",
|
||||
"load_chain",
|
||||
"HypotheticalDocumentEmbedder",
|
||||
]
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, root_validator
|
||||
from pydantic import BaseModel, Field, root_validator
|
||||
|
||||
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
|
||||
from langchain.chains.base import Chain
|
||||
@@ -18,7 +18,7 @@ class APIChain(Chain, BaseModel):
|
||||
|
||||
api_request_chain: LLMChain
|
||||
api_answer_chain: LLMChain
|
||||
requests_wrapper: RequestsWrapper
|
||||
requests_wrapper: RequestsWrapper = Field(exclude=True)
|
||||
api_docs: str
|
||||
question_key: str = "question" #: :meta private:
|
||||
output_key: str = "output" #: :meta private:
|
||||
@@ -102,3 +102,7 @@ class APIChain(Chain, BaseModel):
|
||||
api_docs=api_docs,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "api_chain"
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
"""Base interface that all chains should implement."""
|
||||
import json
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Extra, Field, validator
|
||||
|
||||
import langchain
|
||||
@@ -44,7 +47,9 @@ class Chain(BaseModel, ABC):
|
||||
"""Base interface that all chains should implement."""
|
||||
|
||||
memory: Optional[Memory] = None
|
||||
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
|
||||
callback_manager: BaseCallbackManager = Field(
|
||||
default_factory=get_callback_manager, exclude=True
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default_factory=_get_verbosity
|
||||
) # Whether to print the response text
|
||||
@@ -54,6 +59,10 @@ class Chain(BaseModel, ABC):
|
||||
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
raise NotImplementedError("Saving not supported for this chain type.")
|
||||
|
||||
@validator("callback_manager", pre=True, always=True)
|
||||
def set_callback_manager(
|
||||
cls, callback_manager: Optional[BaseCallbackManager]
|
||||
@@ -141,7 +150,7 @@ class Chain(BaseModel, ABC):
|
||||
)
|
||||
try:
|
||||
outputs = self._call(inputs)
|
||||
except Exception as e:
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
self.callback_manager.on_chain_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
|
||||
@@ -177,3 +186,43 @@ class Chain(BaseModel, ABC):
|
||||
f"`run` supported with either positional arguments or keyword arguments"
|
||||
f" but not both. Got args: {args} and kwargs: {kwargs}."
|
||||
)
|
||||
|
||||
def dict(self, **kwargs: Any) -> Dict:
|
||||
"""Return dictionary representation of chain."""
|
||||
if self.memory is not None:
|
||||
raise ValueError("Saving of memory is not yet supported.")
|
||||
_dict = super().dict()
|
||||
_dict["_type"] = self._chain_type
|
||||
return _dict
|
||||
|
||||
def save(self, file_path: Union[Path, str]) -> None:
|
||||
"""Save the chain.
|
||||
|
||||
Args:
|
||||
file_path: Path to file to save the chain to.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
chain.save(file_path="path/chain.yaml")
|
||||
"""
|
||||
# Convert file to Path object.
|
||||
if isinstance(file_path, str):
|
||||
save_path = Path(file_path)
|
||||
else:
|
||||
save_path = file_path
|
||||
|
||||
directory_path = save_path.parent
|
||||
directory_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Fetch dictionary to save
|
||||
chain_dict = self.dict()
|
||||
|
||||
if save_path.suffix == ".json":
|
||||
with open(file_path, "w") as f:
|
||||
json.dump(chain_dict, f, indent=4)
|
||||
elif save_path.suffix == ".yaml":
|
||||
with open(file_path, "w") as f:
|
||||
yaml.dump(chain_dict, f, default_flow_style=False)
|
||||
else:
|
||||
raise ValueError(f"{save_path} must be json or yaml")
|
||||
|
||||
@@ -168,3 +168,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel):
|
||||
extra_return_dict = {}
|
||||
output, _ = self.combine_document_chain.combine_docs(result_docs, **kwargs)
|
||||
return output, extra_return_dict
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "map_reduce_documents_chain"
|
||||
|
||||
@@ -111,3 +111,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain, BaseModel):
|
||||
if self.return_intermediate_steps:
|
||||
extra_info["intermediate_steps"] = results
|
||||
return output[self.answer_key], extra_info
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "map_rerank_documents_chain"
|
||||
|
||||
@@ -113,3 +113,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain, BaseModel):
|
||||
else:
|
||||
extra_return_dict = {}
|
||||
return res, extra_return_dict
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "refine_documents_chain"
|
||||
|
||||
@@ -83,3 +83,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain, BaseModel):
|
||||
inputs = self._get_inputs(docs, **kwargs)
|
||||
# Call predict on the LLM.
|
||||
return self.llm_chain.predict(**inputs), {}
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "stuff_documents_chain"
|
||||
|
||||
@@ -230,12 +230,12 @@ class ConversationEntityMemory(Memory, BaseModel):
|
||||
llm: BaseLLM
|
||||
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
|
||||
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
|
||||
memory_keys: List[str] = ["entities", "history"] #: :meta private:
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
store: Dict[str, Optional[str]] = {}
|
||||
entity_cache: List[str] = []
|
||||
k: int = 3
|
||||
chat_history_key: str = "history"
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
@@ -243,7 +243,7 @@ class ConversationEntityMemory(Memory, BaseModel):
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return ["entities", "history"]
|
||||
return ["entities", self.chat_history_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
@@ -265,7 +265,7 @@ class ConversationEntityMemory(Memory, BaseModel):
|
||||
entity_summaries[entity] = self.store.get(entity, "")
|
||||
self.entity_cache = entities
|
||||
return {
|
||||
"history": "\n".join(self.buffer[-self.k :]),
|
||||
self.chat_history_key: "\n".join(self.buffer[-self.k :]),
|
||||
"entities": entity_summaries,
|
||||
}
|
||||
|
||||
|
||||
@@ -4,18 +4,19 @@ https://arxiv.org/abs/2212.10496
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
from typing import Dict, List
|
||||
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.hyde.prompts import PROMPT_MAP
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from langchain.embeddings.hyde.prompts import PROMPT_MAP
|
||||
from langchain.llms.base import BaseLLM
|
||||
|
||||
|
||||
class HypotheticalDocumentEmbedder(Embeddings, BaseModel):
|
||||
class HypotheticalDocumentEmbedder(Chain, Embeddings, BaseModel):
|
||||
"""Generate hypothetical document for query, and then embed that.
|
||||
|
||||
Based on https://arxiv.org/abs/2212.10496
|
||||
@@ -30,10 +31,24 @@ class HypotheticalDocumentEmbedder(Embeddings, BaseModel):
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Input keys for Hyde's LLM chain."""
|
||||
return self.llm_chain.input_keys
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Output keys for Hyde's LLM chain."""
|
||||
return self.llm_chain.output_keys
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Call the base embeddings."""
|
||||
return self.base_embeddings.embed_documents(texts)
|
||||
|
||||
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
|
||||
"""Combine embeddings into final embeddings."""
|
||||
return list(np.array(embeddings).mean(axis=0))
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Generate a hypothetical document and embedded it."""
|
||||
var_name = self.llm_chain.input_keys[0]
|
||||
@@ -42,9 +57,9 @@ class HypotheticalDocumentEmbedder(Embeddings, BaseModel):
|
||||
embeddings = self.embed_documents(documents)
|
||||
return self.combine_embeddings(embeddings)
|
||||
|
||||
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
|
||||
"""Combine embeddings into final embeddings."""
|
||||
return list(np.array(embeddings).mean(axis=0))
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Call the internal llm chain."""
|
||||
return self.llm_chain._call(inputs)
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
@@ -54,3 +69,7 @@ class HypotheticalDocumentEmbedder(Embeddings, BaseModel):
|
||||
prompt = PROMPT_MAP[prompt_key]
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "hyde_chain"
|
||||
@@ -122,3 +122,7 @@ class LLMChain(Chain, BaseModel):
|
||||
return new_result
|
||||
else:
|
||||
return result
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "llm_chain"
|
||||
|
||||
@@ -73,3 +73,7 @@ class LLMBashChain(Chain, BaseModel):
|
||||
else:
|
||||
raise ValueError(f"unknown format from LLM: {t}")
|
||||
return {self.output_key: output}
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "llm_bash_chain"
|
||||
|
||||
@@ -97,3 +97,7 @@ class LLMCheckerChain(Chain, BaseModel):
|
||||
)
|
||||
output = question_to_checked_assertions_chain({"question": question})
|
||||
return {self.output_key: output["revised_statement"]}
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "llm_checker_chain"
|
||||
|
||||
@@ -68,3 +68,7 @@ class LLMMathChain(Chain, BaseModel):
|
||||
else:
|
||||
raise ValueError(f"unknown format from LLM: {t}")
|
||||
return {self.output_key: answer}
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "llm_math_chain"
|
||||
|
||||
@@ -18,7 +18,9 @@ class LLMRequestsChain(Chain, BaseModel):
|
||||
"""Chain that hits a URL and then uses an LLM to parse results."""
|
||||
|
||||
llm_chain: LLMChain
|
||||
requests_wrapper: RequestsWrapper = Field(default_factory=RequestsWrapper)
|
||||
requests_wrapper: RequestsWrapper = Field(
|
||||
default_factory=RequestsWrapper, exclude=True
|
||||
)
|
||||
text_length: int = 8000
|
||||
requests_key: str = "requests_result" #: :meta private:
|
||||
input_key: str = "url" #: :meta private:
|
||||
@@ -71,3 +73,7 @@ class LLMRequestsChain(Chain, BaseModel):
|
||||
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
|
||||
result = self.llm_chain.predict(**other_keys)
|
||||
return {self.output_key: result}
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "llm_requests_chain"
|
||||
|
||||
467
langchain/chains/loading.py
Normal file
467
langchain/chains/loading.py
Normal file
@@ -0,0 +1,467 @@
|
||||
"""Functionality for loading chains."""
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Union
|
||||
|
||||
import yaml
|
||||
|
||||
from langchain.chains.api.base import APIChain
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
|
||||
from langchain.chains.combine_documents.refine import RefineDocumentsChain
|
||||
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_bash.base import LLMBashChain
|
||||
from langchain.chains.llm_checker.base import LLMCheckerChain
|
||||
from langchain.chains.llm_math.base import LLMMathChain
|
||||
from langchain.chains.llm_requests import LLMRequestsChain
|
||||
from langchain.chains.pal.base import PALChain
|
||||
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
|
||||
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
|
||||
from langchain.chains.sql_database.base import SQLDatabaseChain
|
||||
from langchain.chains.vector_db_qa.base import VectorDBQA
|
||||
from langchain.llms.loading import load_llm, load_llm_from_config
|
||||
from langchain.prompts.loading import load_prompt, load_prompt_from_config
|
||||
from langchain.utilities.loading import try_load_from_hub
|
||||
|
||||
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
|
||||
|
||||
|
||||
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
|
||||
"""Load LLM chain from config dict."""
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
elif "llm_path" in config:
|
||||
llm = load_llm(config.pop("llm_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm` or `llm_path` must be present.")
|
||||
|
||||
if "prompt" in config:
|
||||
prompt_config = config.pop("prompt")
|
||||
prompt = load_prompt_from_config(prompt_config)
|
||||
elif "prompt_path" in config:
|
||||
prompt = load_prompt(config.pop("prompt_path"))
|
||||
else:
|
||||
raise ValueError("One of `prompt` or `prompt_path` must be present.")
|
||||
|
||||
return LLMChain(llm=llm, prompt=prompt, **config)
|
||||
|
||||
|
||||
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
|
||||
"""Load hypothetical document embedder chain from config dict."""
|
||||
if "llm_chain" in config:
|
||||
llm_chain_config = config.pop("llm_chain")
|
||||
llm_chain = load_chain_from_config(llm_chain_config)
|
||||
elif "llm_chain_path" in config:
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
||||
if "embeddings" in kwargs:
|
||||
embeddings = kwargs.pop("embeddings")
|
||||
else:
|
||||
raise ValueError("`embeddings` must be present.")
|
||||
return HypotheticalDocumentEmbedder(
|
||||
llm_chain=llm_chain, base_embeddings=embeddings, **config
|
||||
)
|
||||
|
||||
|
||||
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
|
||||
if "llm_chain" in config:
|
||||
llm_chain_config = config.pop("llm_chain")
|
||||
llm_chain = load_chain_from_config(llm_chain_config)
|
||||
elif "llm_chain_path" in config:
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
|
||||
|
||||
if not isinstance(llm_chain, LLMChain):
|
||||
raise ValueError(f"Expected LLMChain, got {llm_chain}")
|
||||
|
||||
if "document_prompt" in config:
|
||||
prompt_config = config.pop("document_prompt")
|
||||
document_prompt = load_prompt_from_config(prompt_config)
|
||||
elif "document_prompt_path" in config:
|
||||
document_prompt = load_prompt(config.pop("document_prompt_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `document_prompt` or `document_prompt_path` must be present."
|
||||
)
|
||||
|
||||
return StuffDocumentsChain(
|
||||
llm_chain=llm_chain, document_prompt=document_prompt, **config
|
||||
)
|
||||
|
||||
|
||||
def _load_map_reduce_documents_chain(
|
||||
config: dict, **kwargs: Any
|
||||
) -> MapReduceDocumentsChain:
|
||||
if "llm_chain" in config:
|
||||
llm_chain_config = config.pop("llm_chain")
|
||||
llm_chain = load_chain_from_config(llm_chain_config)
|
||||
elif "llm_chain_path" in config:
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
|
||||
|
||||
if not isinstance(llm_chain, LLMChain):
|
||||
raise ValueError(f"Expected LLMChain, got {llm_chain}")
|
||||
|
||||
if "combine_document_chain" in config:
|
||||
combine_document_chain_config = config.pop("combine_document_chain")
|
||||
combine_document_chain = load_chain_from_config(combine_document_chain_config)
|
||||
elif "combine_document_chain_path" in config:
|
||||
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `combine_document_chain` or "
|
||||
"`combine_document_chain_path` must be present."
|
||||
)
|
||||
if "collapse_document_chain" in config:
|
||||
collapse_document_chain_config = config.pop("collapse_document_chain")
|
||||
if collapse_document_chain_config is None:
|
||||
collapse_document_chain = None
|
||||
else:
|
||||
collapse_document_chain = load_chain_from_config(
|
||||
collapse_document_chain_config
|
||||
)
|
||||
elif "collapse_document_chain_path" in config:
|
||||
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
combine_document_chain=combine_document_chain,
|
||||
collapse_document_chain=collapse_document_chain,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
elif "llm_path" in config:
|
||||
llm = load_llm(config.pop("llm_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm` or `llm_path` must be present.")
|
||||
if "prompt" in config:
|
||||
prompt_config = config.pop("prompt")
|
||||
prompt = load_prompt_from_config(prompt_config)
|
||||
elif "prompt_path" in config:
|
||||
prompt = load_prompt(config.pop("prompt_path"))
|
||||
return LLMBashChain(llm=llm, prompt=prompt, **config)
|
||||
|
||||
|
||||
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
elif "llm_path" in config:
|
||||
llm = load_llm(config.pop("llm_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm` or `llm_path` must be present.")
|
||||
if "create_draft_answer_prompt" in config:
|
||||
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
|
||||
create_draft_answer_prompt = load_prompt_from_config(
|
||||
create_draft_answer_prompt_config
|
||||
)
|
||||
elif "create_draft_answer_prompt_path" in config:
|
||||
create_draft_answer_prompt = load_prompt(
|
||||
config.pop("create_draft_answer_prompt_path")
|
||||
)
|
||||
if "list_assertions_prompt" in config:
|
||||
list_assertions_prompt_config = config.pop("list_assertions_prompt")
|
||||
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
|
||||
elif "list_assertions_prompt_path" in config:
|
||||
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
|
||||
if "check_assertions_prompt" in config:
|
||||
check_assertions_prompt_config = config.pop("check_assertions_prompt")
|
||||
check_assertions_prompt = load_prompt_from_config(
|
||||
check_assertions_prompt_config
|
||||
)
|
||||
elif "check_assertions_prompt_path" in config:
|
||||
check_assertions_prompt = load_prompt(
|
||||
config.pop("check_assertions_prompt_path")
|
||||
)
|
||||
if "revised_answer_prompt" in config:
|
||||
revised_answer_prompt_config = config.pop("revised_answer_prompt")
|
||||
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
|
||||
elif "revised_answer_prompt_path" in config:
|
||||
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
|
||||
return LLMCheckerChain(
|
||||
llm=llm,
|
||||
create_draft_answer_prompt=create_draft_answer_prompt,
|
||||
list_assertions_prompt=list_assertions_prompt,
|
||||
check_assertions_prompt=check_assertions_prompt,
|
||||
revised_answer_prompt=revised_answer_prompt,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
elif "llm_path" in config:
|
||||
llm = load_llm(config.pop("llm_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm` or `llm_path` must be present.")
|
||||
if "prompt" in config:
|
||||
prompt_config = config.pop("prompt")
|
||||
prompt = load_prompt_from_config(prompt_config)
|
||||
elif "prompt_path" in config:
|
||||
prompt = load_prompt(config.pop("prompt_path"))
|
||||
return LLMMathChain(llm=llm, prompt=prompt, **config)
|
||||
|
||||
|
||||
def _load_map_rerank_documents_chain(
|
||||
config: dict, **kwargs: Any
|
||||
) -> MapRerankDocumentsChain:
|
||||
if "llm_chain" in config:
|
||||
llm_chain_config = config.pop("llm_chain")
|
||||
llm_chain = load_chain_from_config(llm_chain_config)
|
||||
elif "llm_chain_path" in config:
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
|
||||
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
|
||||
|
||||
|
||||
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
elif "llm_path" in config:
|
||||
llm = load_llm(config.pop("llm_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm` or `llm_path` must be present.")
|
||||
if "prompt" in config:
|
||||
prompt_config = config.pop("prompt")
|
||||
prompt = load_prompt_from_config(prompt_config)
|
||||
elif "prompt_path" in config:
|
||||
prompt = load_prompt(config.pop("prompt_path"))
|
||||
else:
|
||||
raise ValueError("One of `prompt` or `prompt_path` must be present.")
|
||||
return PALChain(llm=llm, prompt=prompt, **config)
|
||||
|
||||
|
||||
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
|
||||
if "initial_llm_chain" in config:
|
||||
initial_llm_chain_config = config.pop("initial_llm_chain")
|
||||
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
|
||||
elif "initial_llm_chain_path" in config:
|
||||
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
|
||||
)
|
||||
if "refine_llm_chain" in config:
|
||||
refine_llm_chain_config = config.pop("refine_llm_chain")
|
||||
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
|
||||
elif "refine_llm_chain_path" in config:
|
||||
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
|
||||
)
|
||||
if "document_prompt" in config:
|
||||
prompt_config = config.pop("document_prompt")
|
||||
document_prompt = load_prompt_from_config(prompt_config)
|
||||
elif "document_prompt_path" in config:
|
||||
document_prompt = load_prompt(config.pop("document_prompt_path"))
|
||||
return RefineDocumentsChain(
|
||||
initial_llm_chain=initial_llm_chain,
|
||||
refine_llm_chain=refine_llm_chain,
|
||||
document_prompt=document_prompt,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
|
||||
if "combine_documents_chain" in config:
|
||||
combine_documents_chain_config = config.pop("combine_documents_chain")
|
||||
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
|
||||
elif "combine_documents_chain_path" in config:
|
||||
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `combine_documents_chain` or "
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
|
||||
|
||||
|
||||
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
|
||||
if "database" in kwargs:
|
||||
database = kwargs.pop("database")
|
||||
else:
|
||||
raise ValueError("`database` must be present.")
|
||||
if "llm" in config:
|
||||
llm_config = config.pop("llm")
|
||||
llm = load_llm_from_config(llm_config)
|
||||
elif "llm_path" in config:
|
||||
llm = load_llm(config.pop("llm_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm` or `llm_path` must be present.")
|
||||
if "prompt" in config:
|
||||
prompt_config = config.pop("prompt")
|
||||
prompt = load_prompt_from_config(prompt_config)
|
||||
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
|
||||
|
||||
|
||||
def _load_vector_db_qa_with_sources_chain(
|
||||
config: dict, **kwargs: Any
|
||||
) -> VectorDBQAWithSourcesChain:
|
||||
if "vectorstore" in kwargs:
|
||||
vectorstore = kwargs.pop("vectorstore")
|
||||
else:
|
||||
raise ValueError("`vectorstore` must be present.")
|
||||
if "combine_documents_chain" in config:
|
||||
combine_documents_chain_config = config.pop("combine_documents_chain")
|
||||
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
|
||||
elif "combine_documents_chain_path" in config:
|
||||
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `combine_documents_chain` or "
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return VectorDBQAWithSourcesChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
vectorstore=vectorstore,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
|
||||
if "vectorstore" in kwargs:
|
||||
vectorstore = kwargs.pop("vectorstore")
|
||||
else:
|
||||
raise ValueError("`vectorstore` must be present.")
|
||||
if "combine_documents_chain" in config:
|
||||
combine_documents_chain_config = config.pop("combine_documents_chain")
|
||||
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
|
||||
elif "combine_documents_chain_path" in config:
|
||||
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `combine_documents_chain` or "
|
||||
"`combine_documents_chain_path` must be present."
|
||||
)
|
||||
return VectorDBQA(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
vectorstore=vectorstore,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
|
||||
if "api_request_chain" in config:
|
||||
api_request_chain_config = config.pop("api_request_chain")
|
||||
api_request_chain = load_chain_from_config(api_request_chain_config)
|
||||
elif "api_request_chain_path" in config:
|
||||
api_request_chain = load_chain(config.pop("api_request_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `api_request_chain` or `api_request_chain_path` must be present."
|
||||
)
|
||||
if "api_answer_chain" in config:
|
||||
api_answer_chain_config = config.pop("api_answer_chain")
|
||||
api_answer_chain = load_chain_from_config(api_answer_chain_config)
|
||||
elif "api_answer_chain_path" in config:
|
||||
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
|
||||
else:
|
||||
raise ValueError(
|
||||
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
|
||||
)
|
||||
if "requests_wrapper" in kwargs:
|
||||
requests_wrapper = kwargs.pop("requests_wrapper")
|
||||
else:
|
||||
raise ValueError("`requests_wrapper` must be present.")
|
||||
return APIChain(
|
||||
api_request_chain=api_request_chain,
|
||||
api_answer_chain=api_answer_chain,
|
||||
requests_wrapper=requests_wrapper,
|
||||
**config,
|
||||
)
|
||||
|
||||
|
||||
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
|
||||
if "llm_chain" in config:
|
||||
llm_chain_config = config.pop("llm_chain")
|
||||
llm_chain = load_chain_from_config(llm_chain_config)
|
||||
elif "llm_chain_path" in config:
|
||||
llm_chain = load_chain(config.pop("llm_chain_path"))
|
||||
else:
|
||||
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
||||
if "requests_wrapper" in kwargs:
|
||||
requests_wrapper = kwargs.pop("requests_wrapper")
|
||||
return LLMRequestsChain(
|
||||
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
|
||||
)
|
||||
else:
|
||||
return LLMRequestsChain(llm_chain=llm_chain, **config)
|
||||
|
||||
|
||||
type_to_loader_dict = {
|
||||
"api_chain": _load_api_chain,
|
||||
"hyde_chain": _load_hyde_chain,
|
||||
"llm_chain": _load_llm_chain,
|
||||
"llm_bash_chain": _load_llm_bash_chain,
|
||||
"llm_checker_chain": _load_llm_checker_chain,
|
||||
"llm_math_chain": _load_llm_math_chain,
|
||||
"llm_requests_chain": _load_llm_requests_chain,
|
||||
"pal_chain": _load_pal_chain,
|
||||
"qa_with_sources_chain": _load_qa_with_sources_chain,
|
||||
"stuff_documents_chain": _load_stuff_documents_chain,
|
||||
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
|
||||
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
|
||||
"refine_documents_chain": _load_refine_documents_chain,
|
||||
"sql_database_chain": _load_sql_database_chain,
|
||||
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
|
||||
"vector_db_qa": _load_vector_db_qa,
|
||||
}
|
||||
|
||||
|
||||
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
|
||||
"""Load chain from Config Dict."""
|
||||
if "_type" not in config:
|
||||
raise ValueError("Must specify a chain Type in config")
|
||||
config_type = config.pop("_type")
|
||||
|
||||
if config_type not in type_to_loader_dict:
|
||||
raise ValueError(f"Loading {config_type} chain not supported")
|
||||
|
||||
chain_loader = type_to_loader_dict[config_type]
|
||||
return chain_loader(config, **kwargs)
|
||||
|
||||
|
||||
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
|
||||
"""Unified method for loading a chain from LangChainHub or local fs."""
|
||||
if hub_result := try_load_from_hub(
|
||||
path, _load_chain_from_file, "chains", {"json", "yaml"}
|
||||
):
|
||||
return hub_result
|
||||
else:
|
||||
return _load_chain_from_file(path, **kwargs)
|
||||
|
||||
|
||||
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
|
||||
"""Load chain from file."""
|
||||
# Convert file to Path object.
|
||||
if isinstance(file, str):
|
||||
file_path = Path(file)
|
||||
else:
|
||||
file_path = file
|
||||
# Load from either json or yaml.
|
||||
if file_path.suffix == ".json":
|
||||
with open(file_path) as f:
|
||||
config = json.load(f)
|
||||
elif file_path.suffix == ".yaml":
|
||||
with open(file_path, "r") as f:
|
||||
config = yaml.safe_load(f)
|
||||
else:
|
||||
raise ValueError("File type must be json or yaml")
|
||||
# Load the chain from the config now.
|
||||
return load_chain_from_config(config, **kwargs)
|
||||
@@ -94,3 +94,7 @@ class NatBotChain(Chain, BaseModel):
|
||||
self.input_browser_content_key: browser_content,
|
||||
}
|
||||
return self(_inputs)[self.output_key]
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "nat_bot_chain"
|
||||
|
||||
@@ -1,9 +1,23 @@
|
||||
# flake8: noqa
|
||||
# type: ignore
|
||||
import time
|
||||
from sys import platform
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
TypedDict,
|
||||
Union,
|
||||
)
|
||||
|
||||
black_listed_elements = {
|
||||
if TYPE_CHECKING:
|
||||
from playwright.sync_api import Browser, CDPSession, Page, sync_playwright
|
||||
|
||||
black_listed_elements: Set[str] = {
|
||||
"html",
|
||||
"head",
|
||||
"title",
|
||||
@@ -19,8 +33,21 @@ black_listed_elements = {
|
||||
}
|
||||
|
||||
|
||||
class ElementInViewPort(TypedDict):
|
||||
node_index: str
|
||||
backend_node_id: int
|
||||
node_name: Optional[str]
|
||||
node_value: Optional[str]
|
||||
node_meta: List[str]
|
||||
is_clickable: bool
|
||||
origin_x: int
|
||||
origin_y: int
|
||||
center_x: int
|
||||
center_y: int
|
||||
|
||||
|
||||
class Crawler:
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
@@ -28,16 +55,20 @@ class Crawler:
|
||||
"Could not import playwright python package. "
|
||||
"Please it install it with `pip install playwright`."
|
||||
)
|
||||
self.browser = sync_playwright().start().chromium.launch(headless=False)
|
||||
self.page = self.browser.new_page()
|
||||
self.browser: Browser = (
|
||||
sync_playwright().start().chromium.launch(headless=False)
|
||||
)
|
||||
self.page: Page = self.browser.new_page()
|
||||
self.page.set_viewport_size({"width": 1280, "height": 1080})
|
||||
self.page_element_buffer: Dict[int, ElementInViewPort]
|
||||
self.client: CDPSession
|
||||
|
||||
def go_to_page(self, url):
|
||||
def go_to_page(self, url: str) -> None:
|
||||
self.page.goto(url=url if "://" in url else "http://" + url)
|
||||
self.client = self.page.context.new_cdp_session(self.page)
|
||||
self.page_element_buffer = {}
|
||||
|
||||
def scroll(self, direction):
|
||||
def scroll(self, direction: str) -> None:
|
||||
if direction == "up":
|
||||
self.page.evaluate(
|
||||
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;"
|
||||
@@ -47,7 +78,7 @@ class Crawler:
|
||||
"(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;"
|
||||
)
|
||||
|
||||
def click(self, id):
|
||||
def click(self, id: Union[str, int]) -> None:
|
||||
# Inject javascript into the page which removes the target= attribute from all links
|
||||
js = """
|
||||
links = document.getElementsByTagName("a");
|
||||
@@ -59,41 +90,37 @@ class Crawler:
|
||||
|
||||
element = self.page_element_buffer.get(int(id))
|
||||
if element:
|
||||
x = element.get("center_x")
|
||||
y = element.get("center_y")
|
||||
x: float = element["center_x"]
|
||||
y: float = element["center_y"]
|
||||
|
||||
self.page.mouse.click(x, y)
|
||||
else:
|
||||
print("Could not find element")
|
||||
|
||||
def type(self, id, text):
|
||||
def type(self, id: Union[str, int], text: str) -> None:
|
||||
self.click(id)
|
||||
self.page.keyboard.type(text)
|
||||
|
||||
def enter(self):
|
||||
def enter(self) -> None:
|
||||
self.page.keyboard.press("Enter")
|
||||
|
||||
def crawl(self):
|
||||
def crawl(self) -> List[str]:
|
||||
page = self.page
|
||||
page_element_buffer = self.page_element_buffer
|
||||
start = time.time()
|
||||
|
||||
page_state_as_text = []
|
||||
|
||||
device_pixel_ratio = page.evaluate("window.devicePixelRatio")
|
||||
device_pixel_ratio: float = page.evaluate("window.devicePixelRatio")
|
||||
if platform == "darwin" and device_pixel_ratio == 1: # lies
|
||||
device_pixel_ratio = 2
|
||||
|
||||
win_scroll_x = page.evaluate("window.scrollX")
|
||||
win_scroll_y = page.evaluate("window.scrollY")
|
||||
win_upper_bound = page.evaluate("window.pageYOffset")
|
||||
win_left_bound = page.evaluate("window.pageXOffset")
|
||||
win_width = page.evaluate("window.screen.width")
|
||||
win_height = page.evaluate("window.screen.height")
|
||||
win_right_bound = win_left_bound + win_width
|
||||
win_lower_bound = win_upper_bound + win_height
|
||||
document_offset_height = page.evaluate("document.body.offsetHeight")
|
||||
document_scroll_height = page.evaluate("document.body.scrollHeight")
|
||||
win_upper_bound: float = page.evaluate("window.pageYOffset")
|
||||
win_left_bound: float = page.evaluate("window.pageXOffset")
|
||||
win_width: float = page.evaluate("window.screen.width")
|
||||
win_height: float = page.evaluate("window.screen.height")
|
||||
win_right_bound: float = win_left_bound + win_width
|
||||
win_lower_bound: float = win_upper_bound + win_height
|
||||
|
||||
# percentage_progress_start = (win_upper_bound / document_scroll_height) * 100
|
||||
# percentage_progress_end = (
|
||||
@@ -116,40 +143,35 @@ class Crawler:
|
||||
"DOMSnapshot.captureSnapshot",
|
||||
{"computedStyles": [], "includeDOMRects": True, "includePaintOrder": True},
|
||||
)
|
||||
strings = tree["strings"]
|
||||
document = tree["documents"][0]
|
||||
nodes = document["nodes"]
|
||||
backend_node_id = nodes["backendNodeId"]
|
||||
attributes = nodes["attributes"]
|
||||
node_value = nodes["nodeValue"]
|
||||
parent = nodes["parentIndex"]
|
||||
node_types = nodes["nodeType"]
|
||||
node_names = nodes["nodeName"]
|
||||
is_clickable = set(nodes["isClickable"]["index"])
|
||||
strings: Dict[int, str] = tree["strings"]
|
||||
document: Dict[str, Any] = tree["documents"][0]
|
||||
nodes: Dict[str, Any] = document["nodes"]
|
||||
backend_node_id: Dict[int, int] = nodes["backendNodeId"]
|
||||
attributes: Dict[int, Dict[int, Any]] = nodes["attributes"]
|
||||
node_value: Dict[int, int] = nodes["nodeValue"]
|
||||
parent: Dict[int, int] = nodes["parentIndex"]
|
||||
node_names: Dict[int, int] = nodes["nodeName"]
|
||||
is_clickable: Set[int] = set(nodes["isClickable"]["index"])
|
||||
|
||||
text_value = nodes["textValue"]
|
||||
text_value_index = text_value["index"]
|
||||
text_value_values = text_value["value"]
|
||||
input_value: Dict[str, Any] = nodes["inputValue"]
|
||||
input_value_index: List[int] = input_value["index"]
|
||||
input_value_values: List[int] = input_value["value"]
|
||||
|
||||
input_value = nodes["inputValue"]
|
||||
input_value_index = input_value["index"]
|
||||
input_value_values = input_value["value"]
|
||||
layout: Dict[str, Any] = document["layout"]
|
||||
layout_node_index: List[int] = layout["nodeIndex"]
|
||||
bounds: Dict[int, List[float]] = layout["bounds"]
|
||||
|
||||
input_checked = nodes["inputChecked"]
|
||||
layout = document["layout"]
|
||||
layout_node_index = layout["nodeIndex"]
|
||||
bounds = layout["bounds"]
|
||||
cursor: int = 0
|
||||
|
||||
cursor = 0
|
||||
html_elements_text = []
|
||||
child_nodes: Dict[str, List[Dict[str, Any]]] = {}
|
||||
elements_in_view_port: List[ElementInViewPort] = []
|
||||
|
||||
child_nodes = {}
|
||||
elements_in_view_port = []
|
||||
anchor_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {"-1": (False, None)}
|
||||
button_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {"-1": (False, None)}
|
||||
|
||||
anchor_ancestry = {"-1": (False, None)}
|
||||
button_ancestry = {"-1": (False, None)}
|
||||
|
||||
def convert_name(node_name, has_click_handler):
|
||||
def convert_name(
|
||||
node_name: Optional[str], has_click_handler: Optional[bool]
|
||||
) -> str:
|
||||
if node_name == "a":
|
||||
return "link"
|
||||
if node_name == "input":
|
||||
@@ -163,7 +185,9 @@ class Crawler:
|
||||
else:
|
||||
return "text"
|
||||
|
||||
def find_attributes(attributes, keys):
|
||||
def find_attributes(
|
||||
attributes: Dict[int, Any], keys: List[str]
|
||||
) -> Dict[str, str]:
|
||||
values = {}
|
||||
|
||||
for [key_index, value_index] in zip(*(iter(attributes),) * 2):
|
||||
@@ -181,7 +205,13 @@ class Crawler:
|
||||
|
||||
return values
|
||||
|
||||
def add_to_hash_tree(hash_tree, tag, node_id, node_name, parent_id):
|
||||
def add_to_hash_tree(
|
||||
hash_tree: Dict[str, Tuple[bool, Optional[int]]],
|
||||
tag: str,
|
||||
node_id: int,
|
||||
node_name: Optional[str],
|
||||
parent_id: int,
|
||||
) -> Tuple[bool, Optional[int]]:
|
||||
parent_id_str = str(parent_id)
|
||||
if not parent_id_str in hash_tree:
|
||||
parent_name = strings[node_names[parent_id]].lower()
|
||||
@@ -195,7 +225,7 @@ class Crawler:
|
||||
|
||||
# even if the anchor is nested in another anchor, we set the "root" for all descendants to be ::Self
|
||||
if node_name == tag:
|
||||
value = (True, node_id)
|
||||
value: Tuple[bool, Optional[int]] = (True, node_id)
|
||||
elif (
|
||||
is_parent_desc_anchor
|
||||
): # reuse the parent's anchor_id (which could be much higher in the tree)
|
||||
@@ -212,7 +242,7 @@ class Crawler:
|
||||
|
||||
for index, node_name_index in enumerate(node_names):
|
||||
node_parent = parent[index]
|
||||
node_name = strings[node_name_index].lower()
|
||||
node_name: Optional[str] = strings[node_name_index].lower()
|
||||
|
||||
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(
|
||||
anchor_ancestry, "a", index, node_name, node_parent
|
||||
@@ -253,7 +283,7 @@ class Crawler:
|
||||
if not partially_is_in_viewport:
|
||||
continue
|
||||
|
||||
meta_data = []
|
||||
meta_data: List[str] = []
|
||||
|
||||
# inefficient to grab the same set of keys for kinds of objects, but it's fine for now
|
||||
element_attributes = find_attributes(
|
||||
@@ -274,7 +304,7 @@ class Crawler:
|
||||
else child_nodes.setdefault(str(ancestor_node_key), [])
|
||||
)
|
||||
|
||||
if node_name == "#text" and ancestor_exception:
|
||||
if node_name == "#text" and ancestor_exception and ancestor_node:
|
||||
text = strings[node_value[index]]
|
||||
if text == "|" or text == "•":
|
||||
continue
|
||||
@@ -289,7 +319,7 @@ class Crawler:
|
||||
) # prevent [button ... (button)..]
|
||||
|
||||
for key in element_attributes:
|
||||
if ancestor_exception:
|
||||
if ancestor_exception and ancestor_node:
|
||||
ancestor_node.append(
|
||||
{
|
||||
"type": "attribute",
|
||||
@@ -344,36 +374,32 @@ class Crawler:
|
||||
for element in elements_in_view_port:
|
||||
node_index = element.get("node_index")
|
||||
node_name = element.get("node_name")
|
||||
node_value = element.get("node_value")
|
||||
is_clickable = element.get("is_clickable")
|
||||
origin_x = element.get("origin_x")
|
||||
origin_y = element.get("origin_y")
|
||||
center_x = element.get("center_x")
|
||||
center_y = element.get("center_y")
|
||||
meta_data = element.get("node_meta")
|
||||
element_node_value = element.get("node_value")
|
||||
node_is_clickable = element.get("is_clickable")
|
||||
node_meta_data: Optional[List[str]] = element.get("node_meta")
|
||||
|
||||
inner_text = f"{node_value} " if node_value else ""
|
||||
inner_text = f"{element_node_value} " if element_node_value else ""
|
||||
meta = ""
|
||||
|
||||
if node_index in child_nodes:
|
||||
for child in child_nodes.get(node_index):
|
||||
for child in child_nodes[node_index]:
|
||||
entry_type = child.get("type")
|
||||
entry_value = child.get("value")
|
||||
|
||||
if entry_type == "attribute":
|
||||
if entry_type == "attribute" and node_meta_data:
|
||||
entry_key = child.get("key")
|
||||
meta_data.append(f'{entry_key}="{entry_value}"')
|
||||
node_meta_data.append(f'{entry_key}="{entry_value}"')
|
||||
else:
|
||||
inner_text += f"{entry_value} "
|
||||
|
||||
if meta_data:
|
||||
meta_string = " ".join(meta_data)
|
||||
if node_meta_data:
|
||||
meta_string = " ".join(node_meta_data)
|
||||
meta = f" {meta_string}"
|
||||
|
||||
if inner_text != "":
|
||||
inner_text = f"{inner_text.strip()}"
|
||||
|
||||
converted_node_name = convert_name(node_name, is_clickable)
|
||||
converted_node_name = convert_name(node_name, node_is_clickable)
|
||||
|
||||
# not very elegant, more like a placeholder
|
||||
if (
|
||||
|
||||
@@ -79,3 +79,7 @@ class PALChain(Chain, BaseModel):
|
||||
get_answer_expr="print(answer)",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "pal_chain"
|
||||
|
||||
@@ -126,3 +126,7 @@ class QAWithSourcesChain(BaseQAWithSourcesChain, BaseModel):
|
||||
|
||||
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
|
||||
return inputs.pop(self.input_docs_key)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "qa_with_sources_chain"
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Question-answering with sources over a vector database."""
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.vectorstores.base import VectorStore
|
||||
@@ -11,15 +13,44 @@ from langchain.vectorstores.base import VectorStore
|
||||
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain, BaseModel):
|
||||
"""Question-answering with sources over a vector database."""
|
||||
|
||||
vectorstore: VectorStore
|
||||
vectorstore: VectorStore = Field(exclude=True)
|
||||
"""Vector Database to connect to."""
|
||||
k: int = 4
|
||||
"""Number of results to return from store"""
|
||||
reduce_k_below_max_tokens: bool = False
|
||||
"""Reduce the number of results to return from store based on tokens limit"""
|
||||
max_tokens_limit: int = 3375
|
||||
"""Restrict the docs to return from store based on tokens,
|
||||
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
|
||||
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Extra search args."""
|
||||
|
||||
def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]:
|
||||
num_docs = len(docs)
|
||||
|
||||
if self.reduce_k_below_max_tokens and isinstance(
|
||||
self.combine_documents_chain, StuffDocumentsChain
|
||||
):
|
||||
tokens = [
|
||||
self.combine_documents_chain.llm_chain.llm.get_num_tokens(
|
||||
doc.page_content
|
||||
)
|
||||
for doc in docs
|
||||
]
|
||||
token_count = sum(tokens[:num_docs])
|
||||
while token_count > self.max_tokens_limit:
|
||||
num_docs -= 1
|
||||
token_count -= tokens[num_docs]
|
||||
|
||||
return docs[:num_docs]
|
||||
|
||||
def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]:
|
||||
question = inputs[self.question_key]
|
||||
return self.vectorstore.similarity_search(
|
||||
docs = self.vectorstore.similarity_search(
|
||||
question, k=self.k, **self.search_kwargs
|
||||
)
|
||||
return self._reduce_tokens_below_limit(docs)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "vector_db_qa_with_sources_chain"
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
from pydantic import BaseModel, Extra, Field
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
@@ -26,7 +26,7 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
|
||||
llm: BaseLLM
|
||||
"""LLM wrapper to use."""
|
||||
database: SQLDatabase
|
||||
database: SQLDatabase = Field(exclude=True)
|
||||
"""SQL Database to connect to."""
|
||||
prompt: BasePromptTemplate = PROMPT
|
||||
"""Prompt to use to translate natural language to SQL."""
|
||||
@@ -34,6 +34,7 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
"""Number of results to return from the query"""
|
||||
input_key: str = "query" #: :meta private:
|
||||
output_key: str = "result" #: :meta private:
|
||||
return_intermediate_steps: bool = False
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -55,9 +56,12 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
if not self.return_intermediate_steps:
|
||||
return [self.output_key]
|
||||
else:
|
||||
return [self.output_key, "intermediate_steps"]
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
input_text = f"{inputs[self.input_key]} \nSQLQuery:"
|
||||
self.callback_manager.on_text(input_text, verbose=self.verbose)
|
||||
@@ -71,10 +75,12 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
"table_info": table_info,
|
||||
"stop": ["\nSQLResult:"],
|
||||
}
|
||||
|
||||
intermediate_steps = []
|
||||
sql_cmd = llm_chain.predict(**llm_inputs)
|
||||
intermediate_steps.append(sql_cmd)
|
||||
self.callback_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
|
||||
result = self.database.run(sql_cmd)
|
||||
intermediate_steps.append(result)
|
||||
self.callback_manager.on_text("\nSQLResult: ", verbose=self.verbose)
|
||||
self.callback_manager.on_text(result, color="yellow", verbose=self.verbose)
|
||||
self.callback_manager.on_text("\nAnswer:", verbose=self.verbose)
|
||||
@@ -82,7 +88,14 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
llm_inputs["input"] = input_text
|
||||
final_result = llm_chain.predict(**llm_inputs)
|
||||
self.callback_manager.on_text(final_result, color="green", verbose=self.verbose)
|
||||
return {self.output_key: final_result}
|
||||
chain_result: Dict[str, Any] = {self.output_key: final_result}
|
||||
if self.return_intermediate_steps:
|
||||
chain_result["intermediate_steps"] = intermediate_steps
|
||||
return chain_result
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "sql_database_chain"
|
||||
|
||||
|
||||
class SQLDatabaseSequentialChain(Chain, BaseModel):
|
||||
@@ -153,3 +166,7 @@ class SQLDatabaseSequentialChain(Chain, BaseModel):
|
||||
"table_names_to_use": table_names_to_use,
|
||||
}
|
||||
return self.sql_chain(new_inputs, return_only_outputs=True)
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
return "sql_database_sequential_chain"
|
||||
|
||||
@@ -3,6 +3,11 @@ from langchain.prompts.base import CommaSeparatedListOutputParser
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
_DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results using the LIMIT clause. You can order the results by a relevant column to return the most interesting examples in the database.
|
||||
|
||||
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
|
||||
|
||||
Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
|
||||
|
||||
Use the following format:
|
||||
|
||||
Question: "Question here"
|
||||
|
||||
@@ -29,7 +29,7 @@ class VectorDBQA(Chain, BaseModel):
|
||||
|
||||
"""
|
||||
|
||||
vectorstore: VectorStore
|
||||
vectorstore: VectorStore = Field(exclude=True)
|
||||
"""Vector Database to connect to."""
|
||||
k: int = 4
|
||||
"""Number of documents to query for."""
|
||||
@@ -41,6 +41,8 @@ class VectorDBQA(Chain, BaseModel):
|
||||
"""Return the source documents."""
|
||||
search_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
||||
"""Extra search args."""
|
||||
search_type: str = "similarity"
|
||||
"""Search type to use over vectorstore. `similarity` or `mmr`."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -90,6 +92,15 @@ class VectorDBQA(Chain, BaseModel):
|
||||
values["combine_documents_chain"] = combine_documents_chain
|
||||
return values
|
||||
|
||||
@root_validator()
|
||||
def validate_search_type(cls, values: Dict) -> Dict:
|
||||
"""Validate search type."""
|
||||
if "search_type" in values:
|
||||
search_type = values["search_type"]
|
||||
if search_type not in ("similarity", "mmr"):
|
||||
raise ValueError(f"search_type of {search_type} not allowed.")
|
||||
return values
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls, llm: BaseLLM, prompt: PromptTemplate = PROMPT, **kwargs: Any
|
||||
@@ -129,12 +140,24 @@ class VectorDBQA(Chain, BaseModel):
|
||||
"""
|
||||
question = inputs[self.input_key]
|
||||
|
||||
docs = self.vectorstore.similarity_search(
|
||||
question, k=self.k, **self.search_kwargs
|
||||
)
|
||||
if self.search_type == "similarity":
|
||||
docs = self.vectorstore.similarity_search(
|
||||
question, k=self.k, **self.search_kwargs
|
||||
)
|
||||
elif self.search_type == "mmr":
|
||||
docs = self.vectorstore.max_marginal_relevance_search(
|
||||
question, k=self.k, **self.search_kwargs
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"search_type of {self.search_type} not allowed.")
|
||||
answer, _ = self.combine_documents_chain.combine_docs(docs, question=question)
|
||||
|
||||
if self.return_source_documents:
|
||||
return {self.output_key: answer, "source_documents": docs}
|
||||
else:
|
||||
return {self.output_key: answer}
|
||||
|
||||
@property
|
||||
def _chain_type(self) -> str:
|
||||
"""Return the chain type."""
|
||||
return "vector_db_qa"
|
||||
|
||||
29
langchain/docker-compose.yaml
Normal file
29
langchain/docker-compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
version: '3'
|
||||
services:
|
||||
langchain-frontend:
|
||||
image: notlangchain/langchainplus-frontend:latest
|
||||
ports:
|
||||
- 4173:4173
|
||||
environment:
|
||||
- BACKEND_URL=http://langchain-backend:8000
|
||||
- PUBLIC_BASE_URL=http://localhost:8000
|
||||
- PUBLIC_DEV_MODE=true
|
||||
depends_on:
|
||||
- langchain-backend
|
||||
langchain-backend:
|
||||
image: notlangchain/langchainplus:latest
|
||||
environment:
|
||||
- PORT=8000
|
||||
- LANGCHAIN_ENV=local
|
||||
ports:
|
||||
- 8000:8000
|
||||
depends_on:
|
||||
- langchain-db
|
||||
langchain-db:
|
||||
image: postgres:14.1
|
||||
environment:
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_DB=postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
@@ -1,14 +1,41 @@
|
||||
"""Wrappers around embedding modules."""
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from langchain.embeddings.cohere import CohereEmbeddings
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
from langchain.embeddings.huggingface_hub import HuggingFaceHubEmbeddings
|
||||
from langchain.embeddings.hyde.base import HypotheticalDocumentEmbedder
|
||||
from langchain.embeddings.openai import OpenAIEmbeddings
|
||||
from langchain.embeddings.tensorflow_hub import TensorflowHubEmbeddings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"OpenAIEmbeddings",
|
||||
"HuggingFaceEmbeddings",
|
||||
"CohereEmbeddings",
|
||||
"HuggingFaceHubEmbeddings",
|
||||
"HypotheticalDocumentEmbedder",
|
||||
"TensorflowHubEmbeddings",
|
||||
]
|
||||
|
||||
|
||||
# TODO: this is in here to maintain backwards compatibility
|
||||
class HypotheticalDocumentEmbedder:
|
||||
def __init__(self, *args: Any, **kwargs: Any):
|
||||
logger.warning(
|
||||
"Using a deprecated class. Please use "
|
||||
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
|
||||
)
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
|
||||
|
||||
return H(*args, **kwargs) # type: ignore
|
||||
|
||||
@classmethod
|
||||
def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
|
||||
logger.warning(
|
||||
"Using a deprecated class. Please use "
|
||||
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
|
||||
)
|
||||
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
|
||||
|
||||
return H.from_llm(*args, **kwargs)
|
||||
|
||||
@@ -18,7 +18,7 @@ class CohereEmbeddings(BaseModel, Embeddings):
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.embeddings import CohereEmbeddings
|
||||
cohere = CohereEmbeddings(model_name="medium", cohere_api_key="my-api-key")
|
||||
cohere = CohereEmbeddings(model="medium", cohere_api_key="my-api-key")
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
|
||||
@@ -54,7 +54,7 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
"""
|
||||
texts = list(map(lambda x: x.replace("\n", " "), texts))
|
||||
embeddings = self.client.encode(texts)
|
||||
return embeddings
|
||||
return embeddings.tolist()
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Compute query embeddings using a HuggingFace transformer model.
|
||||
@@ -67,4 +67,4 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings):
|
||||
"""
|
||||
text = text.replace("\n", " ")
|
||||
embedding = self.client.encode(text)
|
||||
return embedding
|
||||
return embedding.tolist()
|
||||
|
||||
70
langchain/embeddings/tensorflow_hub.py
Normal file
70
langchain/embeddings/tensorflow_hub.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Wrapper around TensorflowHub embedding models."""
|
||||
from typing import Any, List
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.embeddings.base import Embeddings
|
||||
|
||||
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
|
||||
|
||||
|
||||
class TensorflowHubEmbeddings(BaseModel, Embeddings):
|
||||
"""Wrapper around tensorflow_hub embedding models.
|
||||
|
||||
To use, you should have the ``tensorflow_text`` python package installed.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.embeddings import TensorflowHubEmbeddings
|
||||
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
|
||||
tf = TensorflowHubEmbeddings(model_url=url)
|
||||
"""
|
||||
|
||||
embed: Any #: :meta private:
|
||||
model_url: str = DEFAULT_MODEL_URL
|
||||
"""Model name to use."""
|
||||
|
||||
def __init__(self, **kwargs: Any):
|
||||
"""Initialize the tensorflow_hub and tensorflow_text."""
|
||||
super().__init__(**kwargs)
|
||||
try:
|
||||
import tensorflow_hub
|
||||
import tensorflow_text # noqa
|
||||
|
||||
self.embed = tensorflow_hub.load(self.model_url)
|
||||
except ImportError as e:
|
||||
raise ValueError(
|
||||
"Could not import some python packages." "Please install them."
|
||||
) from e
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Compute doc embeddings using a TensorflowHub embedding model.
|
||||
|
||||
Args:
|
||||
texts: The list of texts to embed.
|
||||
|
||||
Returns:
|
||||
List of embeddings, one for each text.
|
||||
"""
|
||||
texts = list(map(lambda x: x.replace("\n", " "), texts))
|
||||
embeddings = self.embed(texts).numpy()
|
||||
return embeddings.tolist()
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Compute query embeddings using a TensorflowHub embedding model.
|
||||
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
Embeddings for the text.
|
||||
"""
|
||||
text = text.replace("\n", " ")
|
||||
embedding = self.embed(text).numpy()[0]
|
||||
return embedding.tolist()
|
||||
@@ -74,12 +74,12 @@ class BaseLLM(BaseModel, ABC):
|
||||
)
|
||||
try:
|
||||
output = self._generate(prompts, stop=stop)
|
||||
except Exception as e:
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
self.callback_manager.on_llm_end(output, verbose=self.verbose)
|
||||
return output
|
||||
params = self._llm_dict()
|
||||
params = self.dict()
|
||||
params["stop"] = stop
|
||||
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||
missing_prompts = []
|
||||
@@ -92,21 +92,25 @@ class BaseLLM(BaseModel, ABC):
|
||||
else:
|
||||
missing_prompts.append(prompt)
|
||||
missing_prompt_idxs.append(i)
|
||||
self.callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
|
||||
)
|
||||
try:
|
||||
new_results = self._generate(missing_prompts, stop=stop)
|
||||
except Exception as e:
|
||||
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
|
||||
for i, result in enumerate(new_results.generations):
|
||||
existing_prompts[missing_prompt_idxs[i]] = result
|
||||
prompt = prompts[missing_prompt_idxs[i]]
|
||||
langchain.llm_cache.update(prompt, llm_string, result)
|
||||
if len(missing_prompts) > 0:
|
||||
self.callback_manager.on_llm_start(
|
||||
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
|
||||
)
|
||||
try:
|
||||
new_results = self._generate(missing_prompts, stop=stop)
|
||||
except (KeyboardInterrupt, Exception) as e:
|
||||
self.callback_manager.on_llm_error(e, verbose=self.verbose)
|
||||
raise e
|
||||
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
|
||||
for i, result in enumerate(new_results.generations):
|
||||
existing_prompts[missing_prompt_idxs[i]] = result
|
||||
prompt = prompts[missing_prompt_idxs[i]]
|
||||
langchain.llm_cache.update(prompt, llm_string, result)
|
||||
llm_output = new_results.llm_output
|
||||
else:
|
||||
llm_output = {}
|
||||
generations = [existing_prompts[i] for i in range(len(prompts))]
|
||||
return LLMResult(generations=generations, llm_output=new_results.llm_output)
|
||||
return LLMResult(generations=generations, llm_output=llm_output)
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
"""Get the number of tokens present in the text."""
|
||||
@@ -148,8 +152,8 @@ class BaseLLM(BaseModel, ABC):
|
||||
def _llm_type(self) -> str:
|
||||
"""Return type of llm."""
|
||||
|
||||
def _llm_dict(self) -> Dict:
|
||||
"""Return a dictionary of the prompt."""
|
||||
def dict(self, **kwargs: Any) -> Dict:
|
||||
"""Return a dictionary of the LLM."""
|
||||
starter_dict = dict(self._identifying_params)
|
||||
starter_dict["_type"] = self._llm_type
|
||||
return starter_dict
|
||||
@@ -175,7 +179,7 @@ class BaseLLM(BaseModel, ABC):
|
||||
directory_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Fetch dictionary to save
|
||||
prompt_dict = self._llm_dict()
|
||||
prompt_dict = self.dict()
|
||||
|
||||
if save_path.suffix == ".json":
|
||||
with open(file_path, "w") as f:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Wrapper around Cohere APIs."""
|
||||
from typing import Any, Dict, List, Mapping, Optional
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
@@ -7,6 +8,8 @@ from langchain.llms.base import LLM
|
||||
from langchain.llms.utils import enforce_stop_tokens
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Cohere(LLM, BaseModel):
|
||||
"""Wrapper around Cohere large language models.
|
||||
@@ -46,6 +49,8 @@ class Cohere(LLM, BaseModel):
|
||||
|
||||
cohere_api_key: Optional[str] = None
|
||||
|
||||
stop: Optional[List[str]] = None
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
@@ -69,7 +74,7 @@ class Cohere(LLM, BaseModel):
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Mapping[str, Any]:
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling Cohere API."""
|
||||
return {
|
||||
"max_tokens": self.max_tokens,
|
||||
@@ -81,7 +86,7 @@ class Cohere(LLM, BaseModel):
|
||||
}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
def _identifying_params(self) -> Dict[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{"model": self.model}, **self._default_params}
|
||||
|
||||
@@ -105,12 +110,18 @@ class Cohere(LLM, BaseModel):
|
||||
|
||||
response = cohere("Tell me a joke.")
|
||||
"""
|
||||
response = self.client.generate(
|
||||
model=self.model, prompt=prompt, stop_sequences=stop, **self._default_params
|
||||
)
|
||||
params = self._default_params
|
||||
if self.stop is not None and stop is not None:
|
||||
raise ValueError("`stop` found in both the input and default params.")
|
||||
elif self.stop is not None:
|
||||
params["stop_sequences"] = self.stop
|
||||
else:
|
||||
params["stop_sequences"] = stop
|
||||
|
||||
response = self.client.generate(model=self.model, prompt=prompt, **params)
|
||||
text = response.generations[0].text
|
||||
# If stop tokens are provided, Cohere's endpoint returns them.
|
||||
# In order to make this consistent with other endpoints, we strip them.
|
||||
if stop is not None:
|
||||
text = enforce_stop_tokens(text, stop)
|
||||
if stop is not None or self.stop is not None:
|
||||
text = enforce_stop_tokens(text, params["stop_sequences"])
|
||||
return text
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
"""Wrapper around HuggingFace Pipeline APIs."""
|
||||
import importlib.util
|
||||
import logging
|
||||
from typing import Any, List, Mapping, Optional
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
@@ -10,6 +12,8 @@ DEFAULT_MODEL_ID = "gpt2"
|
||||
DEFAULT_TASK = "text-generation"
|
||||
VALID_TASKS = ("text2text-generation", "text-generation")
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
class HuggingFacePipeline(LLM, BaseModel):
|
||||
"""Wrapper around HuggingFace Pipeline API.
|
||||
@@ -56,6 +60,7 @@ class HuggingFacePipeline(LLM, BaseModel):
|
||||
cls,
|
||||
model_id: str,
|
||||
task: str,
|
||||
device: int = -1,
|
||||
model_kwargs: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> LLM:
|
||||
@@ -68,8 +73,16 @@ class HuggingFacePipeline(LLM, BaseModel):
|
||||
)
|
||||
from transformers import pipeline as hf_pipeline
|
||||
|
||||
_model_kwargs = model_kwargs or {}
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import transformers python package. "
|
||||
"Please it install it with `pip install transformers`."
|
||||
)
|
||||
|
||||
_model_kwargs = model_kwargs or {}
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
|
||||
|
||||
try:
|
||||
if task == "text-generation":
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
|
||||
elif task == "text2text-generation":
|
||||
@@ -79,25 +92,47 @@ class HuggingFacePipeline(LLM, BaseModel):
|
||||
f"Got invalid task {task}, "
|
||||
f"currently only {VALID_TASKS} are supported"
|
||||
)
|
||||
pipeline = hf_pipeline(
|
||||
task=task, model=model, tokenizer=tokenizer, model_kwargs=_model_kwargs
|
||||
)
|
||||
if pipeline.task not in VALID_TASKS:
|
||||
raise ValueError(
|
||||
f"Got invalid task {pipeline.task}, "
|
||||
f"currently only {VALID_TASKS} are supported"
|
||||
)
|
||||
return cls(
|
||||
pipeline=pipeline,
|
||||
model_id=model_id,
|
||||
model_kwargs=_model_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
except ImportError:
|
||||
except ImportError as e:
|
||||
raise ValueError(
|
||||
"Could not import transformers python package. "
|
||||
"Please it install it with `pip install transformers`."
|
||||
f"Could not load the {task} model due to missing dependencies."
|
||||
) from e
|
||||
|
||||
if importlib.util.find_spec("torch") is not None:
|
||||
import torch
|
||||
|
||||
cuda_device_count = torch.cuda.device_count()
|
||||
if device < -1 or (device >= cuda_device_count):
|
||||
raise ValueError(
|
||||
f"Got device=={device}, "
|
||||
f"device is required to be within [-1, {cuda_device_count})"
|
||||
)
|
||||
if device < 0 and cuda_device_count > 0:
|
||||
logger.warning(
|
||||
"Device has %d GPUs available. "
|
||||
"Provide device={deviceId} to `from_model_id` to use available"
|
||||
"GPUs for execution. deviceId is -1 (default) for CPU and "
|
||||
"can be a positive integer associated with CUDA device id.",
|
||||
cuda_device_count,
|
||||
)
|
||||
|
||||
pipeline = hf_pipeline(
|
||||
task=task,
|
||||
model=model,
|
||||
tokenizer=tokenizer,
|
||||
device=device,
|
||||
model_kwargs=_model_kwargs,
|
||||
)
|
||||
if pipeline.task not in VALID_TASKS:
|
||||
raise ValueError(
|
||||
f"Got invalid task {pipeline.task}, "
|
||||
f"currently only {VALID_TASKS} are supported"
|
||||
)
|
||||
return cls(
|
||||
pipeline=pipeline,
|
||||
model_id=model_id,
|
||||
model_kwargs=_model_kwargs,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
|
||||
@@ -182,7 +182,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
generations=generations, llm_output={"token_usage": token_usage}
|
||||
)
|
||||
|
||||
def stream(self, prompt: str) -> Generator:
|
||||
def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator:
|
||||
"""Call OpenAI with streaming flag and return the resulting generator.
|
||||
|
||||
BETA: this is a beta feature while we figure out the right abstraction.
|
||||
@@ -190,6 +190,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
|
||||
Args:
|
||||
prompt: The prompts to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
|
||||
Returns:
|
||||
A generator representing the stream of tokens from OpenAI.
|
||||
@@ -204,6 +205,10 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
params = self._invocation_params
|
||||
if params["best_of"] != 1:
|
||||
raise ValueError("OpenAI only supports best_of == 1 for streaming")
|
||||
if stop is not None:
|
||||
if "stop" in params:
|
||||
raise ValueError("`stop` found in both the input and default params.")
|
||||
params["stop"] = stop
|
||||
params["stream"] = True
|
||||
generator = self.client.create(prompt=prompt, **params)
|
||||
|
||||
@@ -249,7 +254,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
def modelname_to_contextsize(self, modelname: str) -> int:
|
||||
"""Calculate the maximum number of tokens possible to generate for a model.
|
||||
|
||||
text-davinci-003: 4,000 tokens
|
||||
text-davinci-003: 4,097 tokens
|
||||
text-curie-001: 2,048 tokens
|
||||
text-babbage-001: 2,048 tokens
|
||||
text-ada-001: 2,048 tokens
|
||||
@@ -268,7 +273,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
|
||||
"""
|
||||
if modelname == "text-davinci-003":
|
||||
return 4000
|
||||
return 4097
|
||||
elif modelname == "text-curie-001":
|
||||
return 2048
|
||||
elif modelname == "text-babbage-001":
|
||||
@@ -280,7 +285,7 @@ class BaseOpenAI(BaseLLM, BaseModel):
|
||||
elif modelname == "code-cushman-001":
|
||||
return 2048
|
||||
else:
|
||||
return 4000
|
||||
return 4097
|
||||
|
||||
def max_tokens_for_prompt(self, prompt: str) -> int:
|
||||
"""Calculate the maximum number of tokens possible to generate for a prompt.
|
||||
|
||||
@@ -48,13 +48,24 @@ def check_valid_template(
|
||||
raise ValueError("Invalid prompt schema.")
|
||||
|
||||
|
||||
class BaseOutputParser(ABC):
|
||||
class BaseOutputParser(BaseModel, ABC):
|
||||
"""Class to parse the output of an LLM call."""
|
||||
|
||||
@abstractmethod
|
||||
def parse(self, text: str) -> Union[str, List[str], Dict[str, str]]:
|
||||
"""Parse the output of an LLM call."""
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
"""Return the type key."""
|
||||
raise NotImplementedError
|
||||
|
||||
def dict(self, **kwargs: Any) -> Dict:
|
||||
"""Return dictionary representation of output parser."""
|
||||
output_parser_dict = super().dict()
|
||||
output_parser_dict["_type"] = self._type
|
||||
return output_parser_dict
|
||||
|
||||
|
||||
class ListOutputParser(BaseOutputParser):
|
||||
"""Class to parse the output of an LLM call to a list."""
|
||||
@@ -79,6 +90,11 @@ class RegexParser(BaseOutputParser, BaseModel):
|
||||
output_keys: List[str]
|
||||
default_output_key: Optional[str] = None
|
||||
|
||||
@property
|
||||
def _type(self) -> str:
|
||||
"""Return the type key."""
|
||||
return "regex_parser"
|
||||
|
||||
def parse(self, text: str) -> Dict[str, str]:
|
||||
"""Parse the output of an LLM call."""
|
||||
match = re.search(self.regex, text)
|
||||
@@ -110,7 +126,7 @@ class BasePromptTemplate(BaseModel, ABC):
|
||||
|
||||
@root_validator()
|
||||
def validate_variable_names(cls, values: Dict) -> Dict:
|
||||
"""Validate variable names do not restricted names."""
|
||||
"""Validate variable names do not include restricted names."""
|
||||
if "stop" in values["input_variables"]:
|
||||
raise ValueError(
|
||||
"Cannot have an input variable named 'stop', as it is used internally,"
|
||||
@@ -135,9 +151,16 @@ class BasePromptTemplate(BaseModel, ABC):
|
||||
prompt.format(variable1="foo")
|
||||
"""
|
||||
|
||||
def _prompt_dict(self) -> Dict:
|
||||
"""Return a dictionary of the prompt."""
|
||||
return self.dict()
|
||||
@property
|
||||
@abstractmethod
|
||||
def _prompt_type(self) -> str:
|
||||
"""Return the prompt type key."""
|
||||
|
||||
def dict(self, **kwargs: Any) -> Dict:
|
||||
"""Return dictionary representation of prompt."""
|
||||
prompt_dict = super().dict(**kwargs)
|
||||
prompt_dict["_type"] = self._prompt_type
|
||||
return prompt_dict
|
||||
|
||||
def save(self, file_path: Union[Path, str]) -> None:
|
||||
"""Save the prompt.
|
||||
@@ -160,7 +183,7 @@ class BasePromptTemplate(BaseModel, ABC):
|
||||
directory_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Fetch dictionary to save
|
||||
prompt_dict = self._prompt_dict()
|
||||
prompt_dict = self.dict()
|
||||
|
||||
if save_path.suffix == ".json":
|
||||
with open(file_path, "w") as f:
|
||||
|
||||
@@ -109,11 +109,13 @@ class FewShotPromptTemplate(BasePromptTemplate, BaseModel):
|
||||
# Format the template with the input variables.
|
||||
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
|
||||
|
||||
def _prompt_dict(self) -> Dict:
|
||||
@property
|
||||
def _prompt_type(self) -> str:
|
||||
"""Return the prompt type key."""
|
||||
return "few_shot"
|
||||
|
||||
def dict(self, **kwargs: Any) -> Dict:
|
||||
"""Return a dictionary of the prompt."""
|
||||
if self.example_selector:
|
||||
raise ValueError("Saving an example selector is not currently supported")
|
||||
|
||||
prompt_dict = self.dict()
|
||||
prompt_dict["_type"] = "few_shot"
|
||||
return prompt_dict
|
||||
return super().dict(**kwargs)
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
"""Load prompts from disk."""
|
||||
import importlib
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.base import BasePromptTemplate, RegexParser
|
||||
from langchain.prompts.few_shot import FewShotPromptTemplate
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.utilities.loading import try_load_from_hub
|
||||
|
||||
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
|
||||
|
||||
@@ -69,6 +67,20 @@ def _load_examples(config: dict) -> dict:
|
||||
return config
|
||||
|
||||
|
||||
def _load_output_parser(config: dict) -> dict:
|
||||
"""Load output parser."""
|
||||
if "output_parser" in config:
|
||||
if config["output_parser"] is not None:
|
||||
_config = config["output_parser"]
|
||||
output_parser_type = _config["_type"]
|
||||
if output_parser_type == "regex_parser":
|
||||
output_parser = RegexParser(**_config)
|
||||
else:
|
||||
raise ValueError(f"Unsupported output parser {output_parser_type}")
|
||||
config["output_parser"] = output_parser
|
||||
return config
|
||||
|
||||
|
||||
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
|
||||
"""Load the few shot prompt from the config."""
|
||||
# Load the suffix and prefix templates.
|
||||
@@ -83,9 +95,10 @@ def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
|
||||
)
|
||||
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
|
||||
else:
|
||||
config["example_prompt"] = _load_prompt(config["example_prompt"])
|
||||
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
|
||||
# Load the examples.
|
||||
config = _load_examples(config)
|
||||
config = _load_output_parser(config)
|
||||
return FewShotPromptTemplate(**config)
|
||||
|
||||
|
||||
@@ -93,14 +106,16 @@ def _load_prompt(config: dict) -> PromptTemplate:
|
||||
"""Load the prompt template from config."""
|
||||
# Load the template from disk if necessary.
|
||||
config = _load_template("template", config)
|
||||
config = _load_output_parser(config)
|
||||
return PromptTemplate(**config)
|
||||
|
||||
|
||||
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
|
||||
"""Unified method for loading a prompt from LangChainHub or local fs."""
|
||||
if isinstance(path, str) and path.startswith("lc://prompts"):
|
||||
path = os.path.relpath("lc://prompts/conversation/prompt.json", "lc://prompts/")
|
||||
return _load_from_hub(path)
|
||||
if hub_result := try_load_from_hub(
|
||||
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
|
||||
):
|
||||
return hub_result
|
||||
else:
|
||||
return _load_prompt_from_file(path)
|
||||
|
||||
@@ -135,19 +150,3 @@ def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
|
||||
raise ValueError(f"Got unsupported file type {file_path.suffix}")
|
||||
# Load the prompt from the config now.
|
||||
return load_prompt_from_config(config)
|
||||
|
||||
|
||||
def _load_from_hub(path: str) -> BasePromptTemplate:
|
||||
"""Load prompt from hub."""
|
||||
suffix = path.split(".")[-1]
|
||||
if suffix not in {"py", "json", "yaml"}:
|
||||
raise ValueError("Unsupported file type.")
|
||||
full_url = URL_BASE + path
|
||||
r = requests.get(full_url)
|
||||
if r.status_code != 200:
|
||||
raise ValueError(f"Could not find file at {full_url}")
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
file = tmpdirname + "/prompt." + suffix
|
||||
with open(file, "wb") as f:
|
||||
f.write(r.content)
|
||||
return _load_prompt_from_file(file)
|
||||
|
||||
@@ -31,6 +31,11 @@ class PromptTemplate(BasePromptTemplate, BaseModel):
|
||||
template_format: str = "f-string"
|
||||
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
|
||||
|
||||
@property
|
||||
def _prompt_type(self) -> str:
|
||||
"""Return the prompt type key."""
|
||||
return "prompt"
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
"""Common schema objects."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, NamedTuple, Optional
|
||||
|
||||
from dataclasses_json import dataclass_json
|
||||
|
||||
|
||||
class AgentAction(NamedTuple):
|
||||
"""Agent's action to take."""
|
||||
@@ -18,7 +21,9 @@ class AgentFinish(NamedTuple):
|
||||
log: str
|
||||
|
||||
|
||||
class Generation(NamedTuple):
|
||||
@dataclass_json
|
||||
@dataclass
|
||||
class Generation:
|
||||
"""Output of a single generation."""
|
||||
|
||||
text: str
|
||||
@@ -30,7 +35,9 @@ class Generation(NamedTuple):
|
||||
# TODO: add log probs
|
||||
|
||||
|
||||
class LLMResult(NamedTuple):
|
||||
@dataclass_json
|
||||
@dataclass
|
||||
class LLMResult:
|
||||
"""Class that contains all relevant information for an LLM Result."""
|
||||
|
||||
generations: List[List[Generation]]
|
||||
|
||||
14
langchain/server.py
Normal file
14
langchain/server.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Script to run langchain-server locally using docker-compose."""
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run the langchain server locally."""
|
||||
p = Path(__file__).absolute().parent / "docker-compose.yaml"
|
||||
subprocess.run(["docker-compose", "-f", str(p), "pull"])
|
||||
subprocess.run(["docker-compose", "-f", str(p), "up"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -16,6 +16,7 @@ class SQLDatabase:
|
||||
schema: Optional[str] = None,
|
||||
ignore_tables: Optional[List[str]] = None,
|
||||
include_tables: Optional[List[str]] = None,
|
||||
sample_row_in_table_info: bool = False,
|
||||
):
|
||||
"""Create engine from database URI."""
|
||||
self._engine = engine
|
||||
@@ -39,6 +40,7 @@ class SQLDatabase:
|
||||
raise ValueError(
|
||||
f"ignore_tables {missing_tables} not found in database"
|
||||
)
|
||||
self._sample_row_in_table_info = sample_row_in_table_info
|
||||
|
||||
@classmethod
|
||||
def from_uri(cls, database_uri: str, **kwargs: Any) -> SQLDatabase:
|
||||
@@ -69,14 +71,28 @@ class SQLDatabase:
|
||||
if missing_tables:
|
||||
raise ValueError(f"table_names {missing_tables} not found in database")
|
||||
all_table_names = table_names
|
||||
|
||||
template = "Table '{table_name}' has columns: {columns}."
|
||||
|
||||
tables = []
|
||||
for table_name in all_table_names:
|
||||
|
||||
columns = []
|
||||
for column in self._inspector.get_columns(table_name, schema=self._schema):
|
||||
columns.append(f"{column['name']} ({str(column['type'])})")
|
||||
column_str = ", ".join(columns)
|
||||
table_str = template.format(table_name=table_name, columns=column_str)
|
||||
|
||||
if self._sample_row_in_table_info:
|
||||
row_template = (
|
||||
" Here is an example row for this table"
|
||||
" (long strings are truncated): {sample_row}."
|
||||
)
|
||||
sample_row = self.run(f"SELECT * FROM '{table_name}' LIMIT 1")
|
||||
if len(eval(sample_row)) > 0:
|
||||
sample_row = " ".join([str(i)[:100] for i in eval(sample_row)[0]])
|
||||
table_str += row_template.format(sample_row=sample_row)
|
||||
|
||||
tables.append(table_str)
|
||||
return "\n".join(tables)
|
||||
|
||||
@@ -86,7 +102,7 @@ class SQLDatabase:
|
||||
If the statement returns rows, a string of the results is returned.
|
||||
If the statement returns no rows, an empty string is returned.
|
||||
"""
|
||||
with self._engine.connect() as connection:
|
||||
with self._engine.begin() as connection:
|
||||
if self._schema is not None:
|
||||
connection.exec_driver_sql(f"SET search_path TO {self._schema}")
|
||||
cursor = connection.exec_driver_sql(command)
|
||||
|
||||
@@ -12,15 +12,13 @@ class BashProcess:
|
||||
|
||||
def run(self, commands: Union[str, List[str]]) -> str:
|
||||
"""Run commands and return final output."""
|
||||
outputs = []
|
||||
if isinstance(commands, str):
|
||||
commands = [commands]
|
||||
for command in commands:
|
||||
try:
|
||||
output = subprocess.check_output(command, shell=True).decode()
|
||||
if self.strip_newlines:
|
||||
output = output.strip()
|
||||
outputs.append(output)
|
||||
except subprocess.CalledProcessError as error:
|
||||
return str(error)
|
||||
return outputs[-1]
|
||||
commands = ";".join(commands)
|
||||
try:
|
||||
output = subprocess.check_output(commands, shell=True).decode()
|
||||
except subprocess.CalledProcessError as error:
|
||||
return str(error)
|
||||
if self.strip_newlines:
|
||||
output = output.strip()
|
||||
return output
|
||||
|
||||
@@ -54,7 +54,7 @@ class BingSearchAPIWrapper(BaseModel):
|
||||
values,
|
||||
"bing_search_url",
|
||||
"BING_SEARCH_URL",
|
||||
default="https://api.bing.microsoft.com/v7.0/search",
|
||||
# default="https://api.bing.microsoft.com/v7.0/search",
|
||||
)
|
||||
|
||||
values["bing_search_url"] = bing_search_url
|
||||
@@ -71,3 +71,30 @@ class BingSearchAPIWrapper(BaseModel):
|
||||
snippets.append(result["snippet"])
|
||||
|
||||
return " ".join(snippets)
|
||||
|
||||
def results(self, query: str, num_results: int) -> List[Dict]:
|
||||
"""Run query through BingSearch and return metadata.
|
||||
|
||||
Args:
|
||||
query: The query to search for.
|
||||
num_results: The number of results to return.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries with the following keys:
|
||||
snippet - The description of the result.
|
||||
title - The title of the result.
|
||||
link - The link to the result.
|
||||
"""
|
||||
metadata_results = []
|
||||
results = self._bing_search_results(query, count=num_results)
|
||||
if len(results) == 0:
|
||||
return [{"Result": "No good Bing Search Result was found"}]
|
||||
for result in results:
|
||||
metadata_result = {
|
||||
"snippet": result["snippet"],
|
||||
"title": result["name"],
|
||||
"link": result["url"],
|
||||
}
|
||||
metadata_results.append(metadata_result)
|
||||
|
||||
return metadata_results
|
||||
|
||||
@@ -48,6 +48,7 @@ class GoogleSearchAPIWrapper(BaseModel):
|
||||
search_engine: Any #: :meta private:
|
||||
google_api_key: Optional[str] = None
|
||||
google_cse_id: Optional[str] = None
|
||||
k: int = 10
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
@@ -91,10 +92,37 @@ class GoogleSearchAPIWrapper(BaseModel):
|
||||
def run(self, query: str) -> str:
|
||||
"""Run query through GoogleSearch and parse result."""
|
||||
snippets = []
|
||||
results = self._google_search_results(query, num=10)
|
||||
results = self._google_search_results(query, num=self.k)
|
||||
if len(results) == 0:
|
||||
return "No good Google Search Result was found"
|
||||
for result in results:
|
||||
snippets.append(result["snippet"])
|
||||
|
||||
return " ".join(snippets)
|
||||
|
||||
def results(self, query: str, num_results: int) -> List[Dict]:
|
||||
"""Run query through GoogleSearch and return metadata.
|
||||
|
||||
Args:
|
||||
query: The query to search for.
|
||||
num_results: The number of results to return.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries with the following keys:
|
||||
snippet - The description of the result.
|
||||
title - The title of the result.
|
||||
link - The link to the result.
|
||||
"""
|
||||
metadata_results = []
|
||||
results = self._google_search_results(query, num=num_results)
|
||||
if len(results) == 0:
|
||||
return [{"Result": "No good Google Search Result was found"}]
|
||||
for result in results:
|
||||
metadata_result = {
|
||||
"snippet": result["snippet"],
|
||||
"title": result["title"],
|
||||
"link": result["link"],
|
||||
}
|
||||
metadata_results.append(metadata_result)
|
||||
|
||||
return metadata_results
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user