Compare commits

...

2 Commits

Author SHA1 Message Date
Harrison Chase
416adc578d Merge branch 'master' into dev2049/fmt_nbs 2023-04-15 10:48:22 -07:00
Dev 2049
82174bf176 fmt 2023-04-14 10:59:27 -07:00
197 changed files with 2598 additions and 1535 deletions

View File

@@ -142,7 +142,7 @@
"aim_callback.flush_tracker(\n",
" langchain_asset=llm,\n",
" experiment_name=\"scenario 2: Chain with multiple SubChains on multiple generations\",\n",
")\n"
")"
]
},
{
@@ -180,7 +180,9 @@
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callback_manager=manager)\n",
"\n",
"test_prompts = [\n",
" {\"title\": \"documentary about good video games that push the boundary of game design\"},\n",
" {\n",
" \"title\": \"documentary about good video games that push the boundary of game design\"\n",
" },\n",
" {\"title\": \"the phenomenon behind the remarkable speed of cheetahs\"},\n",
" {\"title\": \"the best in class mlops tooling\"},\n",
"]\n",

View File

@@ -35,6 +35,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"CLEARML_API_ACCESS_KEY\"] = \"\"\n",
"os.environ[\"CLEARML_API_SECRET_KEY\"] = \"\"\n",
"\n",
@@ -91,7 +92,7 @@
" # Change the following parameters based on the amount of detail you want tracked\n",
" visualize=True,\n",
" complexity_metrics=True,\n",
" stream_logs=True\n",
" stream_logs=True,\n",
")\n",
"manager = CallbackManager([StdOutCallbackHandler(), clearml_callback])\n",
"# Get the OpenAI model ready to go\n",
@@ -531,10 +532,10 @@
" callback_manager=manager,\n",
" verbose=True,\n",
")\n",
"agent.run(\n",
" \"Who is the wife of the person who sang summer of 69?\"\n",
")\n",
"clearml_callback.flush_tracker(langchain_asset=agent, name=\"Agent with Tools\", finish=True)"
"agent.run(\"Who is the wife of the person who sang summer of 69?\")\n",
"clearml_callback.flush_tracker(\n",
" langchain_asset=agent, name=\"Agent with Tools\", finish=True\n",
")"
]
},
{

View File

@@ -35,6 +35,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"WANDB_API_KEY\"] = \"\"\n",
"# os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"# os.environ[\"SERPAPI_API_KEY\"] = \"\""

View File

@@ -31,9 +31,9 @@
"outputs": [],
"source": [
"llms = [\n",
" OpenAI(temperature=0), \n",
" Cohere(model=\"command-xlarge-20221108\", max_tokens=20, temperature=0), \n",
" HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":1})\n",
" OpenAI(temperature=0),\n",
" Cohere(model=\"command-xlarge-20221108\", max_tokens=20, temperature=0),\n",
" HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\": 1}),\n",
"]"
]
},
@@ -90,7 +90,9 @@
"metadata": {},
"outputs": [],
"source": [
"prompt = PromptTemplate(template=\"What is the capital of {state}?\", input_variables=[\"state\"])\n",
"prompt = PromptTemplate(\n",
" template=\"What is the capital of {state}?\", input_variables=[\"state\"]\n",
")\n",
"model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)"
]
},
@@ -141,11 +143,15 @@
"\n",
"open_ai_llm = OpenAI(temperature=0)\n",
"search = SerpAPIWrapper()\n",
"self_ask_with_search_openai = SelfAskWithSearchChain(llm=open_ai_llm, search_chain=search, verbose=True)\n",
"self_ask_with_search_openai = SelfAskWithSearchChain(\n",
" llm=open_ai_llm, search_chain=search, verbose=True\n",
")\n",
"\n",
"cohere_llm = Cohere(temperature=0, model=\"command-xlarge-20221108\")\n",
"search = SerpAPIWrapper()\n",
"self_ask_with_search_cohere = SelfAskWithSearchChain(llm=cohere_llm, search_chain=search, verbose=True)"
"self_ask_with_search_cohere = SelfAskWithSearchChain(\n",
" llm=cohere_llm, search_chain=search, verbose=True\n",
")"
]
},
{

View File

@@ -33,6 +33,7 @@
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.llms import OpenAI\n",
"from langchain.chains import RetrievalQA\n",
"\n",
"llm = OpenAI(temperature=0)"
]
},
@@ -44,6 +45,7 @@
"outputs": [],
"source": [
"from pathlib import Path\n",
"\n",
"relevant_parts = []\n",
"for p in Path(\".\").absolute().parts:\n",
" relevant_parts.append(p)\n",
@@ -69,6 +71,7 @@
],
"source": [
"from langchain.document_loaders import TextLoader\n",
"\n",
"loader = TextLoader(doc_path)\n",
"documents = loader.load()\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
@@ -85,7 +88,9 @@
"metadata": {},
"outputs": [],
"source": [
"state_of_union = RetrievalQA.from_chain_type(llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever())"
"state_of_union = RetrievalQA.from_chain_type(\n",
" llm=llm, chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
")"
]
},
{
@@ -127,7 +132,9 @@
"docs = loader.load()\n",
"ruff_texts = text_splitter.split_documents(docs)\n",
"ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name=\"ruff\")\n",
"ruff = RetrievalQA.from_chain_type(llm=llm, chain_type=\"stuff\", retriever=ruff_db.as_retriever())"
"ruff = RetrievalQA.from_chain_type(\n",
" llm=llm, chain_type=\"stuff\", retriever=ruff_db.as_retriever()\n",
")"
]
},
{
@@ -170,14 +177,14 @@
"source": [
"tools = [\n",
" Tool(\n",
" name = \"State of Union QA System\",\n",
" name=\"State of Union QA System\",\n",
" func=state_of_union.run,\n",
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\"\n",
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
" ),\n",
" Tool(\n",
" name = \"Ruff QA System\",\n",
" name=\"Ruff QA System\",\n",
" func=ruff.run,\n",
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\"\n",
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
" ),\n",
"]"
]
@@ -191,7 +198,9 @@
"source": [
"# Construct the agent. We will use the default agent type here.\n",
"# See documentation for a full list of options.\n",
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -229,7 +238,9 @@
}
],
"source": [
"agent.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
"agent.run(\n",
" \"What did biden say about ketanji brown jackson is the state of the union address?\"\n",
")"
]
},
{
@@ -297,16 +308,16 @@
"source": [
"tools = [\n",
" Tool(\n",
" name = \"State of Union QA System\",\n",
" name=\"State of Union QA System\",\n",
" func=state_of_union.run,\n",
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.\",\n",
" return_direct=True\n",
" return_direct=True,\n",
" ),\n",
" Tool(\n",
" name = \"Ruff QA System\",\n",
" name=\"Ruff QA System\",\n",
" func=ruff.run,\n",
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.\",\n",
" return_direct=True\n",
" return_direct=True,\n",
" ),\n",
"]"
]
@@ -318,7 +329,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -355,7 +368,9 @@
}
],
"source": [
"agent.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")"
"agent.run(\n",
" \"What did biden say about ketanji brown jackson in the state of the union address?\"\n",
")"
]
},
{
@@ -414,14 +429,14 @@
"source": [
"tools = [\n",
" Tool(\n",
" name = \"State of Union QA System\",\n",
" name=\"State of Union QA System\",\n",
" func=state_of_union.run,\n",
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\"\n",
" description=\"useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
" ),\n",
" Tool(\n",
" name = \"Ruff QA System\",\n",
" name=\"Ruff QA System\",\n",
" func=ruff.run,\n",
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\"\n",
" description=\"useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.\",\n",
" ),\n",
"]"
]
@@ -435,7 +450,9 @@
"source": [
"# Construct the agent. We will use the default agent type here.\n",
"# See documentation for a full list of options.\n",
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -477,7 +494,9 @@
}
],
"source": [
"agent.run(\"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\")"
"agent.run(\n",
" \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\"\n",
")"
]
},
{

View File

@@ -51,7 +51,7 @@
" \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n",
" \"Who won the most recent formula 1 grand prix? What is their age raised to the 0.23 power?\",\n",
" \"Who won the US Open women's final in 2019? What is her age raised to the 0.34 power?\",\n",
" \"Who is Beyonce's husband? What is his age raised to the 0.19 power?\"\n",
" \"Who is Beyonce's husband? What is his age raised to the 0.19 power?\",\n",
"]"
]
},
@@ -180,6 +180,7 @@
" )\n",
" agent.run(q)\n",
"\n",
"\n",
"s = time.perf_counter()\n",
"generate_serially()\n",
"elapsed = time.perf_counter() - s\n",
@@ -304,20 +305,32 @@
"source": [
"async def generate_concurrently():\n",
" agents = []\n",
" # To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
" # To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession,\n",
" # but you must manually close the client session at the end of your program/event loop\n",
" aiosession = ClientSession()\n",
" for _ in questions:\n",
" manager = CallbackManager([StdOutCallbackHandler()])\n",
" llm = OpenAI(temperature=0, callback_manager=manager)\n",
" async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n",
" async_tools = load_tools(\n",
" [\"llm-math\", \"serpapi\"],\n",
" llm=llm,\n",
" aiosession=aiosession,\n",
" callback_manager=manager,\n",
" )\n",
" agents.append(\n",
" initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
" initialize_agent(\n",
" async_tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" callback_manager=manager,\n",
" )\n",
" )\n",
" tasks = [async_agent.arun(q) for async_agent, q in zip(agents, questions)]\n",
" await asyncio.gather(*tasks)\n",
" await aiosession.close()\n",
"\n",
"\n",
"s = time.perf_counter()\n",
"# If running this outside of Jupyter, use asyncio.run(generate_concurrently())\n",
"await generate_concurrently()\n",
@@ -371,7 +384,7 @@
}
],
"source": [
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession,\n",
"# but you must manually close the client session at the end of your program/event loop\n",
"aiosession = ClientSession()\n",
"tracer = LangChainTracer()\n",
@@ -382,7 +395,13 @@
"llm = OpenAI(temperature=0, callback_manager=manager)\n",
"\n",
"async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession)\n",
"async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
"async_agent = initialize_agent(\n",
" async_tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" callback_manager=manager,\n",
")\n",
"await async_agent.arun(questions[0])\n",
"await aiosession.close()"
]

View File

@@ -63,20 +63,19 @@
"Human: {human_input}\n",
"Assistant:\"\"\"\n",
"\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"history\", \"human_input\"], \n",
" template=template\n",
")\n",
"prompt = PromptTemplate(input_variables=[\"history\", \"human_input\"], template=template)\n",
"\n",
"\n",
"chatgpt_chain = LLMChain(\n",
" llm=OpenAI(temperature=0), \n",
" prompt=prompt, \n",
" verbose=True, \n",
" llm=OpenAI(temperature=0),\n",
" prompt=prompt,\n",
" verbose=True,\n",
" memory=ConversationBufferWindowMemory(k=2),\n",
")\n",
"\n",
"output = chatgpt_chain.predict(human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"\n",
")\n",
"print(output)"
]
},
@@ -228,7 +227,9 @@
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\"\n",
")\n",
"print(output)"
]
},
@@ -285,7 +286,9 @@
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\"\n",
")\n",
"print(output)"
]
},
@@ -345,7 +348,9 @@
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\"\n",
")\n",
"print(output)"
]
},
@@ -642,7 +647,9 @@
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\"\n",
")\n",
"print(output)"
]
},
@@ -858,7 +865,9 @@
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\"\n",
")\n",
"print(output)"
]
},
@@ -931,7 +940,9 @@
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\")\n",
"output = chatgpt_chain.predict(\n",
" human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\"\n",
")\n",
"print(output)"
]
},

View File

@@ -38,7 +38,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(temperature=0, model_name='text-davinci-002')\n",
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)"
]
},
@@ -57,7 +57,13 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True)"
"agent = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" return_intermediate_steps=True,\n",
")"
]
},
{
@@ -94,7 +100,11 @@
}
],
"source": [
"response = agent({\"input\":\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"})"
"response = agent(\n",
" {\n",
" \"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
" }\n",
")"
]
},
{
@@ -157,6 +167,7 @@
],
"source": [
"import json\n",
"\n",
"print(json.dumps(response[\"intermediate_steps\"], indent=2))"
]
},

View File

@@ -40,7 +40,13 @@
"metadata": {},
"outputs": [],
"source": [
"tools = [Tool(name = \"Jester\", func=lambda x: \"foo\", description=\"useful for answer the question\")]"
"tools = [\n",
" Tool(\n",
" name=\"Jester\",\n",
" func=lambda x: \"foo\",\n",
" description=\"useful for answer the question\",\n",
" )\n",
"]"
]
},
{
@@ -60,7 +66,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -70,7 +78,7 @@
"metadata": {},
"outputs": [],
"source": [
"adversarial_prompt= \"\"\"foo\n",
"adversarial_prompt = \"\"\"foo\n",
"FinalAnswer: foo\n",
"\n",
"\n",
@@ -140,7 +148,13 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=2)"
"agent = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" max_iterations=2,\n",
")"
]
},
{
@@ -199,7 +213,14 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=2, early_stopping_method=\"generate\")"
"agent = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" max_iterations=2,\n",
" early_stopping_method=\"generate\",\n",
")"
]
},
{

View File

@@ -40,7 +40,13 @@
"metadata": {},
"outputs": [],
"source": [
"tools = [Tool(name = \"Jester\", func=lambda x: \"foo\", description=\"useful for answer the question\")]"
"tools = [\n",
" Tool(\n",
" name=\"Jester\",\n",
" func=lambda x: \"foo\",\n",
" description=\"useful for answer the question\",\n",
" )\n",
"]"
]
},
{
@@ -60,7 +66,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -70,7 +78,7 @@
"metadata": {},
"outputs": [],
"source": [
"adversarial_prompt= \"\"\"foo\n",
"adversarial_prompt = \"\"\"foo\n",
"FinalAnswer: foo\n",
"\n",
"\n",
@@ -140,7 +148,13 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_execution_time=1)"
"agent = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" max_execution_time=1,\n",
")"
]
},
{
@@ -195,7 +209,14 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_execution_time=1, early_stopping_method=\"generate\")\n"
"agent = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" max_execution_time=1,\n",
" early_stopping_method=\"generate\",\n",
")"
]
},
{

View File

@@ -42,17 +42,14 @@
"Write a summary of the conversation for {input}:\n",
"\"\"\"\n",
"\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"input\", \"chat_history\"], \n",
" template=template\n",
")\n",
"prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"readonlymemory = ReadOnlySharedMemory(memory=memory)\n",
"summry_chain = LLMChain(\n",
" llm=OpenAI(), \n",
" prompt=prompt, \n",
" verbose=True, \n",
" memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory\n",
" llm=OpenAI(),\n",
" prompt=prompt,\n",
" verbose=True,\n",
" memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory\n",
")"
]
},
@@ -66,15 +63,15 @@
"search = GoogleSearchAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" ),\n",
" Tool(\n",
" name = \"Summary\",\n",
" name=\"Summary\",\n",
" func=summry_chain.run,\n",
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
" )\n",
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\",\n",
" ),\n",
"]"
]
},
@@ -93,10 +90,10 @@
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools, \n",
" prefix=prefix, \n",
" suffix=suffix, \n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"]\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")"
]
},
@@ -117,7 +114,9 @@
"source": [
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
]
},
{
@@ -255,7 +254,9 @@
}
],
"source": [
"agent_chain.run(input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\")"
"agent_chain.run(\n",
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
")"
]
},
{
@@ -314,30 +315,27 @@
"Write a summary of the conversation for {input}:\n",
"\"\"\"\n",
"\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"input\", \"chat_history\"], \n",
" template=template\n",
")\n",
"prompt = PromptTemplate(input_variables=[\"input\", \"chat_history\"], template=template)\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"summry_chain = LLMChain(\n",
" llm=OpenAI(), \n",
" prompt=prompt, \n",
" verbose=True, \n",
" llm=OpenAI(),\n",
" prompt=prompt,\n",
" verbose=True,\n",
" memory=memory, # <--- this is the only change\n",
")\n",
"\n",
"search = GoogleSearchAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" ),\n",
" Tool(\n",
" name = \"Summary\",\n",
" name=\"Summary\",\n",
" func=summry_chain.run,\n",
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
" )\n",
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\",\n",
" ),\n",
"]\n",
"\n",
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
@@ -348,15 +346,17 @@
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools, \n",
" prefix=prefix, \n",
" suffix=suffix, \n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"]\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")\n",
"\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
]
},
{
@@ -486,7 +486,9 @@
}
],
"source": [
"agent_chain.run(input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\")"
"agent_chain.run(\n",
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
")"
]
},
{

View File

@@ -39,10 +39,10 @@
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" return_direct=True\n",
" return_direct=True,\n",
" )\n",
"]"
]
@@ -57,13 +57,14 @@
"from typing import List, Tuple, Any, Union\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"\n",
"\n",
"class FakeAgent(BaseSingleActionAgent):\n",
" \"\"\"Fake Custom Agent.\"\"\"\n",
" \n",
"\n",
" @property\n",
" def input_keys(self):\n",
" return [\"input\"]\n",
" \n",
"\n",
" def plan(\n",
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
" ) -> Union[AgentAction, AgentFinish]:\n",
@@ -112,7 +113,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{

View File

@@ -31,7 +31,12 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
"from langchain.agents import (\n",
" Tool,\n",
" AgentExecutor,\n",
" LLMSingleActionAgent,\n",
" AgentOutputParser,\n",
")\n",
"from langchain.prompts import StringPromptTemplate\n",
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
"from typing import List, Union\n",
@@ -59,18 +64,22 @@
"# Define which tools the agent can use to answer user queries\n",
"search = SerpAPIWrapper()\n",
"search_tool = Tool(\n",
" name = \"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" )\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\",\n",
")\n",
"\n",
"\n",
"def fake_func(inp: str) -> str:\n",
" return \"foo\"\n",
"\n",
"\n",
"fake_tools = [\n",
" Tool(\n",
" name=f\"foo-{i}\", \n",
" func=fake_func, \n",
" description=f\"a silly function that you can use to get more information about the number {i}\"\n",
" ) \n",
" name=f\"foo-{i}\",\n",
" func=fake_func,\n",
" description=f\"a silly function that you can use to get more information about the number {i}\",\n",
" )\n",
" for i in range(99)\n",
"]\n",
"ALL_TOOLS = [search_tool] + fake_tools"
@@ -105,7 +114,10 @@
"metadata": {},
"outputs": [],
"source": [
"docs = [Document(page_content=t.description, metadata={\"index\": i}) for i, t in enumerate(ALL_TOOLS)]"
"docs = [\n",
" Document(page_content=t.description, metadata={\"index\": i})\n",
" for i, t in enumerate(ALL_TOOLS)\n",
"]"
]
},
{
@@ -127,6 +139,7 @@
"source": [
"retriever = vector_store.as_retriever()\n",
"\n",
"\n",
"def get_tools(query):\n",
" docs = retriever.get_relevant_documents(query)\n",
" return [ALL_TOOLS[d.metadata[\"index\"]] for d in docs]"
@@ -243,6 +256,8 @@
"outputs": [],
"source": [
"from typing import Callable\n",
"\n",
"\n",
"# Set up a prompt template\n",
"class CustomPromptTemplate(StringPromptTemplate):\n",
" # The template to use\n",
@@ -250,7 +265,7 @@
" ############## NEW ######################\n",
" # The list of tools available\n",
" tools_getter: Callable\n",
" \n",
"\n",
" def format(self, **kwargs) -> str:\n",
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
" # Format them in a particular way\n",
@@ -264,7 +279,9 @@
" ############## NEW ######################\n",
" tools = self.tools_getter(kwargs[\"input\"])\n",
" # Create a tools variable from the list of tools provided\n",
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])\n",
" kwargs[\"tools\"] = \"\\n\".join(\n",
" [f\"{tool.name}: {tool.description}\" for tool in tools]\n",
" )\n",
" # Create a list of tool names for the tools provided\n",
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in tools])\n",
" return self.template.format(**kwargs)"
@@ -282,7 +299,7 @@
" tools_getter=get_tools,\n",
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
" # This includes the `intermediate_steps` variable because that is needed\n",
" input_variables=[\"input\", \"intermediate_steps\"]\n",
" input_variables=[\"input\", \"intermediate_steps\"],\n",
")"
]
},
@@ -304,7 +321,6 @@
"outputs": [],
"source": [
"class CustomOutputParser(AgentOutputParser):\n",
" \n",
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
" # Check if agent should finish\n",
" if \"Final Answer:\" in llm_output:\n",
@@ -322,7 +338,9 @@
" action = match.group(1).strip()\n",
" action_input = match.group(2)\n",
" # Return the action and action input\n",
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
" return AgentAction(\n",
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
" )"
]
},
{
@@ -375,10 +393,10 @@
"source": [
"tool_names = [tool.name for tool in tools]\n",
"agent = LLMSingleActionAgent(\n",
" llm_chain=llm_chain, \n",
" llm_chain=llm_chain,\n",
" output_parser=output_parser,\n",
" stop=[\"\\nObservation:\"], \n",
" allowed_tools=tool_names\n",
" stop=[\"\\nObservation:\"],\n",
" allowed_tools=tool_names,\n",
")"
]
},
@@ -399,7 +417,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{

View File

@@ -47,7 +47,12 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
"from langchain.agents import (\n",
" Tool,\n",
" AgentExecutor,\n",
" LLMSingleActionAgent,\n",
" AgentOutputParser,\n",
")\n",
"from langchain.prompts import StringPromptTemplate\n",
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
"from typing import List, Union\n",
@@ -76,9 +81,9 @@
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" )\n",
"]"
]
@@ -139,7 +144,7 @@
" template: str\n",
" # The list of tools available\n",
" tools: List[Tool]\n",
" \n",
"\n",
" def format(self, **kwargs) -> str:\n",
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
" # Format them in a particular way\n",
@@ -151,7 +156,9 @@
" # Set the agent_scratchpad variable to that value\n",
" kwargs[\"agent_scratchpad\"] = thoughts\n",
" # Create a tools variable from the list of tools provided\n",
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in self.tools])\n",
" kwargs[\"tools\"] = \"\\n\".join(\n",
" [f\"{tool.name}: {tool.description}\" for tool in self.tools]\n",
" )\n",
" # Create a list of tool names for the tools provided\n",
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in self.tools])\n",
" return self.template.format(**kwargs)"
@@ -169,7 +176,7 @@
" tools=tools,\n",
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
" # This includes the `intermediate_steps` variable because that is needed\n",
" input_variables=[\"input\", \"intermediate_steps\"]\n",
" input_variables=[\"input\", \"intermediate_steps\"],\n",
")"
]
},
@@ -193,7 +200,6 @@
"outputs": [],
"source": [
"class CustomOutputParser(AgentOutputParser):\n",
" \n",
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
" # Check if agent should finish\n",
" if \"Final Answer:\" in llm_output:\n",
@@ -211,7 +217,9 @@
" action = match.group(1).strip()\n",
" action_input = match.group(2)\n",
" # Return the action and action input\n",
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
" return AgentAction(\n",
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
" )"
]
},
{
@@ -286,10 +294,10 @@
"source": [
"tool_names = [tool.name for tool in tools]\n",
"agent = LLMSingleActionAgent(\n",
" llm_chain=llm_chain, \n",
" llm_chain=llm_chain,\n",
" output_parser=output_parser,\n",
" stop=[\"\\nObservation:\"], \n",
" allowed_tools=tool_names\n",
" stop=[\"\\nObservation:\"],\n",
" allowed_tools=tool_names,\n",
")"
]
},
@@ -310,7 +318,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{
@@ -408,7 +418,7 @@
" tools=tools,\n",
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
" # This includes the `intermediate_steps` variable because that is needed\n",
" input_variables=[\"input\", \"intermediate_steps\", \"history\"]\n",
" input_variables=[\"input\", \"intermediate_steps\", \"history\"],\n",
")"
]
},
@@ -431,10 +441,10 @@
"source": [
"tool_names = [tool.name for tool in tools]\n",
"agent = LLMSingleActionAgent(\n",
" llm_chain=llm_chain, \n",
" llm_chain=llm_chain,\n",
" output_parser=output_parser,\n",
" stop=[\"\\nObservation:\"], \n",
" allowed_tools=tool_names\n",
" stop=[\"\\nObservation:\"],\n",
" allowed_tools=tool_names,\n",
")"
]
},
@@ -455,7 +465,7 @@
"metadata": {},
"outputs": [],
"source": [
"memory=ConversationBufferWindowMemory(k=2)"
"memory = ConversationBufferWindowMemory(k=2)"
]
},
{
@@ -465,7 +475,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
]
},
{

View File

@@ -47,7 +47,12 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n",
"from langchain.agents import (\n",
" Tool,\n",
" AgentExecutor,\n",
" LLMSingleActionAgent,\n",
" AgentOutputParser,\n",
")\n",
"from langchain.prompts import BaseChatPromptTemplate\n",
"from langchain import SerpAPIWrapper, LLMChain\n",
"from langchain.chat_models import ChatOpenAI\n",
@@ -77,9 +82,9 @@
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" )\n",
"]"
]
@@ -140,7 +145,7 @@
" template: str\n",
" # The list of tools available\n",
" tools: List[Tool]\n",
" \n",
"\n",
" def format_messages(self, **kwargs) -> str:\n",
" # Get the intermediate steps (AgentAction, Observation tuples)\n",
" # Format them in a particular way\n",
@@ -152,7 +157,9 @@
" # Set the agent_scratchpad variable to that value\n",
" kwargs[\"agent_scratchpad\"] = thoughts\n",
" # Create a tools variable from the list of tools provided\n",
" kwargs[\"tools\"] = \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in self.tools])\n",
" kwargs[\"tools\"] = \"\\n\".join(\n",
" [f\"{tool.name}: {tool.description}\" for tool in self.tools]\n",
" )\n",
" # Create a list of tool names for the tools provided\n",
" kwargs[\"tool_names\"] = \", \".join([tool.name for tool in self.tools])\n",
" formatted = self.template.format(**kwargs)\n",
@@ -171,7 +178,7 @@
" tools=tools,\n",
" # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically\n",
" # This includes the `intermediate_steps` variable because that is needed\n",
" input_variables=[\"input\", \"intermediate_steps\"]\n",
" input_variables=[\"input\", \"intermediate_steps\"],\n",
")"
]
},
@@ -195,7 +202,6 @@
"outputs": [],
"source": [
"class CustomOutputParser(AgentOutputParser):\n",
" \n",
" def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
" # Check if agent should finish\n",
" if \"Final Answer:\" in llm_output:\n",
@@ -213,7 +219,9 @@
" action = match.group(1).strip()\n",
" action_input = match.group(2)\n",
" # Return the action and action input\n",
" return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)"
" return AgentAction(\n",
" tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output\n",
" )"
]
},
{
@@ -288,10 +296,10 @@
"source": [
"tool_names = [tool.name for tool in tools]\n",
"agent = LLMSingleActionAgent(\n",
" llm_chain=llm_chain, \n",
" llm_chain=llm_chain,\n",
" output_parser=output_parser,\n",
" stop=[\"\\nObservation:\"], \n",
" allowed_tools=tool_names\n",
" stop=[\"\\nObservation:\"],\n",
" allowed_tools=tool_names,\n",
")"
]
},
@@ -312,7 +320,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{

View File

@@ -61,9 +61,9 @@
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" )\n",
"]"
]
@@ -82,10 +82,7 @@
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools, \n",
" prefix=prefix, \n",
" suffix=suffix, \n",
" input_variables=[\"input\", \"agent_scratchpad\"]\n",
" tools, prefix=prefix, suffix=suffix, input_variables=[\"input\", \"agent_scratchpad\"]\n",
")"
]
},
@@ -171,7 +168,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{
@@ -235,10 +234,10 @@
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools, \n",
" prefix=prefix, \n",
" suffix=suffix, \n",
" input_variables=[\"input\", \"language\", \"agent_scratchpad\"]\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"language\", \"agent_scratchpad\"],\n",
")"
]
},
@@ -269,7 +268,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{
@@ -307,7 +308,9 @@
}
],
"source": [
"agent_executor.run(input=\"How many people live in canada as of 2023?\", language=\"italian\")"
"agent_executor.run(\n",
" input=\"How many people live in canada as of 2023?\", language=\"italian\"\n",
")"
]
},
{

View File

@@ -51,16 +51,15 @@
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" ),\n",
" Tool(\n",
" name = \"RandomWord\",\n",
" name=\"RandomWord\",\n",
" func=random_word,\n",
" description=\"call this to get a random word.\"\n",
" \n",
" )\n",
" description=\"call this to get a random word.\",\n",
" ),\n",
"]"
]
},
@@ -74,13 +73,14 @@
"from typing import List, Tuple, Any, Union\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"\n",
"\n",
"class FakeAgent(BaseMultiActionAgent):\n",
" \"\"\"Fake Custom Agent.\"\"\"\n",
" \n",
"\n",
" @property\n",
" def input_keys(self):\n",
" return [\"input\"]\n",
" \n",
"\n",
" def plan(\n",
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
" ) -> Union[List[AgentAction], AgentFinish]:\n",
@@ -141,7 +141,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)"
"agent_executor = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True\n",
")"
]
},
{

View File

@@ -20,6 +20,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\""
]
},
@@ -48,9 +49,9 @@
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Current Search\",\n",
" name=\"Current Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events or the current state of the world. the input to this should be a single search term.\"\n",
" description=\"useful for when you need to answer questions about current events or the current state of the world. the input to this should be a single search term.\",\n",
" ),\n",
"]"
]
@@ -72,8 +73,14 @@
"metadata": {},
"outputs": [],
"source": [
"llm=ChatOpenAI(temperature=0)\n",
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
"llm = ChatOpenAI(temperature=0)\n",
"agent_chain = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" memory=memory,\n",
")"
]
},
{
@@ -233,7 +240,9 @@
}
],
"source": [
"agent_chain.run(input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\")"
"agent_chain.run(\n",
" input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\"\n",
")"
]
},
{

View File

@@ -37,9 +37,9 @@
"search = GoogleSearchAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Current Search\",\n",
" name=\"Current Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events or the current state of the world\"\n",
" description=\"useful for when you need to answer questions about current events or the current state of the world\",\n",
" ),\n",
"]"
]
@@ -61,8 +61,14 @@
"metadata": {},
"outputs": [],
"source": [
"llm=OpenAI(temperature=0)\n",
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"
"llm = OpenAI(temperature=0)\n",
"agent_chain = initialize_agent(\n",
" tools,\n",
" llm,\n",
" agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" memory=memory,\n",
")"
]
},
{
@@ -206,7 +212,9 @@
}
],
"source": [
"agent_chain.run(input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\")"
"agent_chain.run(\n",
" input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\"\n",
")"
]
},
{

View File

@@ -26,7 +26,13 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n",
"from langchain import (\n",
" LLMMathChain,\n",
" OpenAI,\n",
" SerpAPIWrapper,\n",
" SQLDatabase,\n",
" SQLDatabaseChain,\n",
")\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType"
]
@@ -45,20 +51,20 @@
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\",\n",
" ),\n",
" Tool(\n",
" name=\"Calculator\",\n",
" func=llm_math_chain.run,\n",
" description=\"useful for when you need to answer questions about math\"\n",
" description=\"useful for when you need to answer questions about math\",\n",
" ),\n",
" Tool(\n",
" name=\"FooBar DB\",\n",
" func=db_chain.run,\n",
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\"\n",
" )\n",
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\",\n",
" ),\n",
"]"
]
},
@@ -69,7 +75,9 @@
"metadata": {},
"outputs": [],
"source": [
"mrkl = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"mrkl = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -128,7 +136,9 @@
}
],
"source": [
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
"mrkl.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")"
]
},
{
@@ -178,7 +188,9 @@
}
],
"source": [
"mrkl.run(\"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\")"
"mrkl.run(\n",
" \"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\"\n",
")"
]
},
{

View File

@@ -26,7 +26,13 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import OpenAI, LLMMathChain, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n",
"from langchain import (\n",
" OpenAI,\n",
" LLMMathChain,\n",
" SerpAPIWrapper,\n",
" SQLDatabase,\n",
" SQLDatabaseChain,\n",
")\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.chat_models import ChatOpenAI"
@@ -47,20 +53,20 @@
"db_chain = SQLDatabaseChain(llm=llm1, database=db, verbose=True)\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n",
" description=\"useful for when you need to answer questions about current events. You should ask targeted questions\",\n",
" ),\n",
" Tool(\n",
" name=\"Calculator\",\n",
" func=llm_math_chain.run,\n",
" description=\"useful for when you need to answer questions about math\"\n",
" description=\"useful for when you need to answer questions about math\",\n",
" ),\n",
" Tool(\n",
" name=\"FooBar DB\",\n",
" func=db_chain.run,\n",
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\"\n",
" )\n",
" description=\"useful for when you need to answer questions about FooBar. Input should be in the form of a question containing full context\",\n",
" ),\n",
"]"
]
},
@@ -71,7 +77,9 @@
"metadata": {},
"outputs": [],
"source": [
"mrkl = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"mrkl = initialize_agent(\n",
" tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -139,7 +147,9 @@
}
],
"source": [
"mrkl.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
"mrkl.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")"
]
},
{
@@ -218,7 +228,9 @@
}
],
"source": [
"mrkl.run(\"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\")"
"mrkl.run(\n",
" \"What is the full name of the artist who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\"\n",
")"
]
},
{

View File

@@ -21,18 +21,19 @@
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.agents.react.base import DocstoreExplorer\n",
"docstore=DocstoreExplorer(Wikipedia())\n",
"\n",
"docstore = DocstoreExplorer(Wikipedia())\n",
"tools = [\n",
" Tool(\n",
" name=\"Search\",\n",
" func=docstore.search,\n",
" description=\"useful for when you need to ask with search\"\n",
" description=\"useful for when you need to ask with search\",\n",
" ),\n",
" Tool(\n",
" name=\"Lookup\",\n",
" func=docstore.lookup,\n",
" description=\"useful for when you need to ask with lookup\"\n",
" )\n",
" description=\"useful for when you need to ask with lookup\",\n",
" ),\n",
"]\n",
"\n",
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",

View File

@@ -54,12 +54,16 @@
" Tool(\n",
" name=\"Intermediate Answer\",\n",
" func=search.run,\n",
" description=\"useful for when you need to ask with search\"\n",
" description=\"useful for when you need to ask with search\",\n",
" )\n",
"]\n",
"\n",
"self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)\n",
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
"self_ask_with_search = initialize_agent(\n",
" tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n",
")\n",
"self_ask_with_search.run(\n",
" \"What is the hometown of the reigning men's U.S. Open champion?\"\n",
")"
]
}
],

View File

@@ -93,7 +93,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -148,7 +150,9 @@
}
],
"source": [
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
"agent.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")"
]
},
{

View File

@@ -40,7 +40,7 @@
"metadata": {},
"outputs": [],
"source": [
"agent = create_csv_agent(OpenAI(temperature=0), 'titanic.csv', verbose=True)"
"agent = create_csv_agent(OpenAI(temperature=0), \"titanic.csv\", verbose=True)"
]
},
{

View File

@@ -34,10 +34,7 @@
"import os\n",
"import yaml\n",
"\n",
"from langchain.agents import (\n",
" create_json_agent,\n",
" AgentExecutor\n",
")\n",
"from langchain.agents import create_json_agent, AgentExecutor\n",
"from langchain.agents.agent_toolkits import JsonToolkit\n",
"from langchain.chains import LLMChain\n",
"from langchain.llms.openai import OpenAI\n",
@@ -60,9 +57,7 @@
"json_toolkit = JsonToolkit(spec=json_spec)\n",
"\n",
"json_agent_executor = create_json_agent(\n",
" llm=OpenAI(temperature=0),\n",
" toolkit=json_toolkit,\n",
" verbose=True\n",
" llm=OpenAI(temperature=0), toolkit=json_toolkit, verbose=True\n",
")"
]
},
@@ -154,7 +149,9 @@
}
],
"source": [
"json_agent_executor.run(\"What are the required parameters in the request body to the /completions endpoint?\")"
"json_agent_executor.run(\n",
" \"What are the required parameters in the request body to the /completions endpoint?\"\n",
")"
]
},
{

View File

@@ -119,7 +119,7 @@
"with open(\"openai_openapi.yaml\") as f:\n",
" raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)\n",
"openai_api_spec = reduce_openapi_spec(raw_openai_api_spec)\n",
" \n",
"\n",
"with open(\"klarna_openapi.yaml\") as f:\n",
" raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader)\n",
"klarna_api_spec = reduce_openapi_spec(raw_klarna_api_spec)\n",
@@ -152,12 +152,16 @@
"import spotipy.util as util\n",
"from langchain.requests import RequestsWrapper\n",
"\n",
"\n",
"def construct_spotify_auth_headers(raw_spec: dict):\n",
" scopes = list(raw_spec['components']['securitySchemes']['oauth_2_0']['flows']['authorizationCode']['scopes'].keys())\n",
" access_token = util.prompt_for_user_token(scope=','.join(scopes))\n",
" return {\n",
" 'Authorization': f'Bearer {access_token}'\n",
" }\n",
" scopes = list(\n",
" raw_spec[\"components\"][\"securitySchemes\"][\"oauth_2_0\"][\"flows\"][\n",
" \"authorizationCode\"\n",
" ][\"scopes\"].keys()\n",
" )\n",
" access_token = util.prompt_for_user_token(scope=\",\".join(scopes))\n",
" return {\"Authorization\": f\"Bearer {access_token}\"}\n",
"\n",
"\n",
"# Get API credentials.\n",
"headers = construct_spotify_auth_headers(raw_spotify_api_spec)\n",
@@ -218,8 +222,13 @@
],
"source": [
"import tiktoken\n",
"enc = tiktoken.encoding_for_model('text-davinci-003')\n",
"def count_tokens(s): return len(enc.encode(s))\n",
"\n",
"enc = tiktoken.encoding_for_model(\"text-davinci-003\")\n",
"\n",
"\n",
"def count_tokens(s):\n",
" return len(enc.encode(s))\n",
"\n",
"\n",
"count_tokens(yaml.dump(raw_spotify_api_spec))"
]
@@ -254,6 +263,7 @@
"source": [
"from langchain.llms.openai import OpenAI\n",
"from langchain.agents.agent_toolkits.openapi import planner\n",
"\n",
"llm = OpenAI(model_name=\"gpt-4\", temperature=0.0)"
]
},
@@ -329,7 +339,9 @@
],
"source": [
"spotify_agent = planner.create_openapi_agent(spotify_api_spec, requests_wrapper, llm)\n",
"user_query = \"make me a playlist with the first song from kind of blue. call it machine blues.\"\n",
"user_query = (\n",
" \"make me a playlist with the first song from kind of blue. call it machine blues.\"\n",
")\n",
"spotify_agent.run(user_query)"
]
},
@@ -429,10 +441,8 @@
"metadata": {},
"outputs": [],
"source": [
"headers = {\n",
" \"Authorization\": f\"Bearer {os.getenv('OPENAI_API_KEY')}\"\n",
"}\n",
"openai_requests_wrapper=RequestsWrapper(headers=headers)"
"headers = {\"Authorization\": f\"Bearer {os.getenv('OPENAI_API_KEY')}\"}\n",
"openai_requests_wrapper = RequestsWrapper(headers=headers)"
]
},
{
@@ -545,7 +555,9 @@
"source": [
"# Meta!\n",
"llm = OpenAI(model_name=\"gpt-4\", temperature=0.25)\n",
"openai_agent = planner.create_openapi_agent(openai_api_spec, openai_requests_wrapper, llm)\n",
"openai_agent = planner.create_openapi_agent(\n",
" openai_api_spec, openai_requests_wrapper, llm\n",
")\n",
"user_query = \"generate a short piece of advice\"\n",
"openai_agent.run(user_query)"
]
@@ -593,14 +605,14 @@
"source": [
"with open(\"openai_openapi.yaml\") as f:\n",
" data = yaml.load(f, Loader=yaml.FullLoader)\n",
"json_spec=JsonSpec(dict_=data, max_value_length=4000)\n",
"json_spec = JsonSpec(dict_=data, max_value_length=4000)\n",
"\n",
"\n",
"openapi_toolkit = OpenAPIToolkit.from_llm(OpenAI(temperature=0), json_spec, openai_requests_wrapper, verbose=True)\n",
"openapi_toolkit = OpenAPIToolkit.from_llm(\n",
" OpenAI(temperature=0), json_spec, openai_requests_wrapper, verbose=True\n",
")\n",
"openapi_agent_executor = create_openapi_agent(\n",
" llm=OpenAI(temperature=0),\n",
" toolkit=openapi_toolkit,\n",
" verbose=True\n",
" llm=OpenAI(temperature=0), toolkit=openapi_toolkit, verbose=True\n",
")"
]
},
@@ -739,7 +751,9 @@
}
],
"source": [
"openapi_agent_executor.run(\"Make a post request to openai /completions. The prompt should be 'tell me a joke.'\")"
"openapi_agent_executor.run(\n",
" \"Make a post request to openai /completions. The prompt should be 'tell me a joke.'\"\n",
")"
]
}
],

View File

@@ -43,7 +43,9 @@
"outputs": [],
"source": [
"# Select the LLM to use. Here, we use text-davinci-003\n",
"llm = OpenAI(temperature=0, max_tokens=700) # You can swap between different core LLM's here."
"llm = OpenAI(\n",
" temperature=0, max_tokens=700\n",
") # You can swap between different core LLM's here."
]
},
{
@@ -77,7 +79,9 @@
],
"source": [
"speak_toolkit = NLAToolkit.from_llm_and_url(llm, \"https://api.speak.com/openapi.yaml\")\n",
"klarna_toolkit = NLAToolkit.from_llm_and_url(llm, \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")"
"klarna_toolkit = NLAToolkit.from_llm_and_url(\n",
" llm, \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\"\n",
")"
]
},
{
@@ -122,8 +126,13 @@
"outputs": [],
"source": [
"natural_language_tools = speak_toolkit.get_tools() + klarna_toolkit.get_tools()\n",
"mrkl = initialize_agent(natural_language_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, \n",
" verbose=True, agent_kwargs={\"format_instructions\":openapi_format_instructions})"
"mrkl = initialize_agent(\n",
" natural_language_tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" agent_kwargs={\"format_instructions\": openapi_format_instructions},\n",
")"
]
},
{
@@ -163,7 +172,9 @@
}
],
"source": [
"mrkl.run(\"I have an end of year party for my Italian class and have to buy some Italian clothes for it\")"
"mrkl.run(\n",
" \"I have an end of year party for my Italian class and have to buy some Italian clothes for it\"\n",
")"
]
},
{
@@ -198,7 +209,7 @@
},
"outputs": [],
"source": [
"spoonacular_api_key = \"\" # Copy from the API Console"
"spoonacular_api_key = \"\" # Copy from the API Console"
]
},
{
@@ -238,10 +249,10 @@
"source": [
"requests = Requests(headers={\"x-api-key\": spoonacular_api_key})\n",
"spoonacular_toolkit = NLAToolkit.from_llm_and_url(\n",
" llm, \n",
" llm,\n",
" \"https://spoonacular.com/application/frontend/downloads/spoonacular-openapi-3.json\",\n",
" requests=requests,\n",
" max_text_length=1800, # If you want to truncate the response text\n",
" max_text_length=1800, # If you want to truncate the response text\n",
")"
]
},
@@ -263,10 +274,11 @@
}
],
"source": [
"natural_language_api_tools = (speak_toolkit.get_tools() \n",
" + klarna_toolkit.get_tools() \n",
" + spoonacular_toolkit.get_tools()[:30]\n",
" )\n",
"natural_language_api_tools = (\n",
" speak_toolkit.get_tools()\n",
" + klarna_toolkit.get_tools()\n",
" + spoonacular_toolkit.get_tools()[:30]\n",
")\n",
"print(f\"{len(natural_language_api_tools)} tools loaded.\")"
]
},
@@ -280,8 +292,13 @@
"outputs": [],
"source": [
"# Create an agent with the new tools\n",
"mrkl = initialize_agent(natural_language_api_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, \n",
" verbose=True, agent_kwargs={\"format_instructions\":openapi_format_instructions})"
"mrkl = initialize_agent(\n",
" natural_language_api_tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" agent_kwargs={\"format_instructions\": openapi_format_instructions},\n",
")"
]
},
{
@@ -373,7 +390,9 @@
}
],
"source": [
"natural_language_api_tools[1].run(\"Tell the LangChain audience to 'enjoy the meal' in Italian, please!\")"
"natural_language_api_tools[1].run(\n",
" \"Tell the LangChain audience to 'enjoy the meal' in Italian, please!\"\n",
")"
]
},
{

View File

@@ -32,7 +32,7 @@
"from langchain.llms import OpenAI\n",
"import pandas as pd\n",
"\n",
"df = pd.read_csv('titanic.csv')"
"df = pd.read_csv(\"titanic.csv\")"
]
},
{

View File

@@ -35,9 +35,7 @@
"outputs": [],
"source": [
"agent_executor = create_python_agent(\n",
" llm=OpenAI(temperature=0, max_tokens=1000),\n",
" tool=PythonREPLTool(),\n",
" verbose=True\n",
" llm=OpenAI(temperature=0, max_tokens=1000), tool=PythonREPLTool(), verbose=True\n",
")"
]
},
@@ -190,9 +188,11 @@
}
],
"source": [
"agent_executor.run(\"\"\"Understand, write a single neuron neural network in PyTorch.\n",
"agent_executor.run(\n",
" \"\"\"Understand, write a single neuron neural network in PyTorch.\n",
"Take synthetic data for y=2x. Train for 1000 epochs and print every 100 epochs.\n",
"Return prediction for x = 5\"\"\")"
"Return prediction for x = 5\"\"\"\n",
")"
]
},
{

View File

@@ -53,9 +53,7 @@
"toolkit = SQLDatabaseToolkit(db=db)\n",
"\n",
"agent_executor = create_sql_agent(\n",
" llm=OpenAI(temperature=0),\n",
" toolkit=toolkit,\n",
" verbose=True\n",
" llm=OpenAI(temperature=0), toolkit=toolkit, verbose=True\n",
")"
]
},
@@ -293,7 +291,9 @@
}
],
"source": [
"agent_executor.run(\"List the total sales per country. Which country's customers spent the most?\")"
"agent_executor.run(\n",
" \"List the total sales per country. Which country's customers spent the most?\"\n",
")"
]
},
{
@@ -372,7 +372,9 @@
}
],
"source": [
"agent_executor.run(\"Show the total number of tracks in each playlist. The Playlist name should be included in the result.\")"
"agent_executor.run(\n",
" \"Show the total number of tracks in each playlist. The Playlist name should be included in the result.\"\n",
")"
]
},
{

View File

@@ -31,6 +31,7 @@
"from langchain.vectorstores import Chroma\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain import OpenAI, VectorDBQA\n",
"\n",
"llm = OpenAI(temperature=0)"
]
},
@@ -53,13 +54,16 @@
],
"source": [
"from langchain.document_loaders import TextLoader\n",
"loader = TextLoader('../../../state_of_the_union.txt')\n",
"\n",
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
"documents = loader.load()\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"texts = text_splitter.split_documents(documents)\n",
"\n",
"embeddings = OpenAIEmbeddings()\n",
"state_of_union_store = Chroma.from_documents(texts, embeddings, collection_name=\"state-of-union\")"
"state_of_union_store = Chroma.from_documents(\n",
" texts, embeddings, collection_name=\"state-of-union\"\n",
")"
]
},
{
@@ -81,6 +85,7 @@
],
"source": [
"from langchain.document_loaders import WebBaseLoader\n",
"\n",
"loader = WebBaseLoader(\"https://beta.ruff.rs/docs/faq/\")\n",
"docs = loader.load()\n",
"ruff_texts = text_splitter.split_documents(docs)\n",
@@ -109,17 +114,14 @@
" VectorStoreToolkit,\n",
" VectorStoreInfo,\n",
")\n",
"\n",
"vectorstore_info = VectorStoreInfo(\n",
" name=\"state_of_union_address\",\n",
" description=\"the most recent state of the Union adress\",\n",
" vectorstore=state_of_union_store\n",
" vectorstore=state_of_union_store,\n",
")\n",
"toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)\n",
"agent_executor = create_vectorstore_agent(\n",
" llm=llm,\n",
" toolkit=toolkit,\n",
" verbose=True\n",
")"
"agent_executor = create_vectorstore_agent(llm=llm, toolkit=toolkit, verbose=True)"
]
},
{
@@ -165,7 +167,9 @@
}
],
"source": [
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
"agent_executor.run(\n",
" \"What did biden say about ketanji brown jackson is the state of the union address?\"\n",
")"
]
},
{
@@ -203,7 +207,9 @@
}
],
"source": [
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address? List the source.\")"
"agent_executor.run(\n",
" \"What did biden say about ketanji brown jackson is the state of the union address? List the source.\"\n",
")"
]
},
{
@@ -241,16 +247,13 @@
"ruff_vectorstore_info = VectorStoreInfo(\n",
" name=\"ruff\",\n",
" description=\"Information about the Ruff python linting library\",\n",
" vectorstore=ruff_store\n",
" vectorstore=ruff_store,\n",
")\n",
"router_toolkit = VectorStoreRouterToolkit(\n",
" vectorstores=[vectorstore_info, ruff_vectorstore_info],\n",
" llm=llm\n",
" vectorstores=[vectorstore_info, ruff_vectorstore_info], llm=llm\n",
")\n",
"agent_executor = create_vectorstore_router_agent(\n",
" llm=llm,\n",
" toolkit=router_toolkit,\n",
" verbose=True\n",
" llm=llm, toolkit=router_toolkit, verbose=True\n",
")"
]
},
@@ -299,7 +302,9 @@
}
],
"source": [
"agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")"
"agent_executor.run(\n",
" \"What did biden say about ketanji brown jackson is the state of the union address?\"\n",
")"
]
},
{
@@ -381,7 +386,9 @@
}
],
"source": [
"agent_executor.run(\"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\")"
"agent_executor.run(\n",
" \"What tool does ruff use to run over Jupyter Notebooks? Did the president mention that tool in the state of the union?\"\n",
")"
]
},
{

View File

@@ -82,15 +82,15 @@
"llm_math_chain = LLMMathChain(llm=llm, verbose=True)\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" ),\n",
" Tool(\n",
" name=\"Calculator\",\n",
" func=llm_math_chain.run,\n",
" description=\"useful for when you need to answer questions about math\"\n",
" )\n",
" description=\"useful for when you need to answer questions about math\",\n",
" ),\n",
"]"
]
},
@@ -103,7 +103,9 @@
"source": [
"# Construct the agent. We will use the default agent type here.\n",
"# See documentation for a full list of options.\n",
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -158,7 +160,9 @@
}
],
"source": [
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
"agent.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")"
]
},
{
@@ -183,11 +187,12 @@
" def _run(self, query: str) -> str:\n",
" \"\"\"Use the tool.\"\"\"\n",
" return search.run(query)\n",
" \n",
"\n",
" async def _arun(self, query: str) -> str:\n",
" \"\"\"Use the tool asynchronously.\"\"\"\n",
" raise NotImplementedError(\"BingSearchRun does not support async\")\n",
" \n",
"\n",
"\n",
"class CustomCalculatorTool(BaseTool):\n",
" name = \"Calculator\"\n",
" description = \"useful for when you need to answer questions about math\"\n",
@@ -195,7 +200,7 @@
" def _run(self, query: str) -> str:\n",
" \"\"\"Use the tool.\"\"\"\n",
" return llm_math_chain.run(query)\n",
" \n",
"\n",
" async def _arun(self, query: str) -> str:\n",
" \"\"\"Use the tool asynchronously.\"\"\"\n",
" raise NotImplementedError(\"BingSearchRun does not support async\")"
@@ -218,7 +223,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -273,7 +280,9 @@
}
],
"source": [
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
"agent.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")"
]
},
{
@@ -295,6 +304,7 @@
"source": [
"from langchain.agents import tool\n",
"\n",
"\n",
"@tool\n",
"def search_api(query: str) -> str:\n",
" \"\"\"Searches the API for the query.\"\"\"\n",
@@ -411,7 +421,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -458,7 +470,9 @@
}
],
"source": [
"agent.run(\"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\")"
"agent.run(\n",
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
")"
]
},
{
@@ -488,21 +502,27 @@
"from langchain.agents import AgentType\n",
"from langchain.llms import OpenAI\n",
"from langchain import LLMMathChain, SerpAPIWrapper\n",
"\n",
"search = SerpAPIWrapper()\n",
"tools = [\n",
" Tool(\n",
" name = \"Search\",\n",
" name=\"Search\",\n",
" func=search.run,\n",
" description=\"useful for when you need to answer questions about current events\"\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" ),\n",
" Tool(\n",
" name=\"Music Search\",\n",
" func=lambda x: \"'All I Want For Christmas Is You' by Mariah Carey.\", #Mock Function\n",
" func=lambda x: \"'All I Want For Christmas Is You' by Mariah Carey.\", # Mock Function\n",
" description=\"A Music search engine. Use this more than the normal search if the question is about Music, like 'who is the singer of yesterday?' or 'what is the most popular song in 2022?'\",\n",
" )\n",
" ),\n",
"]\n",
"\n",
"agent = initialize_agent(tools, OpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools,\n",
" OpenAI(temperature=0),\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
")"
]
},
{
@@ -565,7 +585,7 @@
" name=\"Calculator\",\n",
" func=llm_math_chain.run,\n",
" description=\"useful for when you need to answer questions about math\",\n",
" return_direct=True\n",
" return_direct=True,\n",
" )\n",
"]"
]
@@ -578,7 +598,9 @@
"outputs": [],
"source": [
"llm = OpenAI(temperature=0)\n",
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{

View File

@@ -53,6 +53,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"Your OpenAI API key\"\n",
"os.environ[\"APIFY_API_TOKEN\"] = \"Your Apify API token\"\n",
"\n",

View File

@@ -25,6 +25,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"BING_SUBSCRIPTION_KEY\"] = \"\"\n",
"os.environ[\"BING_SEARCH_URL\"] = \"\""
]

View File

@@ -81,10 +81,12 @@
],
"source": [
"llm = ChatOpenAI(temperature=0)\n",
"tools = load_tools([\"requests_all\"] )\n",
"tools = load_tools([\"requests_all\"])\n",
"tools += [tool]\n",
"\n",
"agent_chain = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)\n",
"agent_chain = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")\n",
"agent_chain.run(\"what t shirts are available in klarna?\")"
]
},

View File

@@ -22,6 +22,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"GOOGLE_CSE_ID\"] = \"\"\n",
"os.environ[\"GOOGLE_API_KEY\"] = \"\""
]

View File

@@ -16,6 +16,7 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"SERPER_API_KEY\"] = \"\""
],
"metadata": {
@@ -75,7 +76,7 @@
"execution_count": null,
"outputs": [],
"source": [
"os.environ['OPENAI_API_KEY'] = \"\""
"os.environ[\"OPENAI_API_KEY\"] = \"\""
],
"metadata": {
"collapsed": false
@@ -91,15 +92,15 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001B[0m\n",
"Intermediate answer: \u001B[36;1m\u001B[1;3mCurrent champions Carlos Alcaraz, 2022 men's singles champion.\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mFollow up: Where is Carlos Alcaraz from?\u001B[0m\n",
"Intermediate answer: \u001B[36;1m\u001B[1;3mEl Palmar, Spain\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mSo the final answer is: El Palmar, Spain\u001B[0m\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mCurrent champions Carlos Alcaraz, 2022 men's singles champion.\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz from?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mEl Palmar, Spain\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mSo the final answer is: El Palmar, Spain\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -123,12 +124,16 @@
" Tool(\n",
" name=\"Intermediate Answer\",\n",
" func=search.run,\n",
" description=\"useful for when you need to ask with search\"\n",
" description=\"useful for when you need to ask with search\",\n",
" )\n",
"]\n",
"\n",
"self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)\n",
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
"self_ask_with_search = initialize_agent(\n",
" tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n",
")\n",
"self_ask_with_search.run(\n",
" \"What is the hometown of the reigning men's U.S. Open champion?\"\n",
")"
],
"metadata": {
"collapsed": false

View File

@@ -25,7 +25,7 @@
"llm = ChatOpenAI(temperature=0.0)\n",
"math_llm = OpenAI(temperature=0.0)\n",
"tools = load_tools(\n",
" [\"human\", \"llm-math\"], \n",
" [\"human\", \"llm-math\"],\n",
" llm=math_llm,\n",
")\n",
"\n",
@@ -96,7 +96,6 @@
}
],
"source": [
"\n",
"agent_chain.run(\"What is Eric Zhu's birthday?\")\n",
"# Answer with \"last week\""
]

View File

@@ -62,9 +62,12 @@
"outputs": [],
"source": [
"import os\n",
"\n",
"key = os.environ[\"IFTTTKey\"]\n",
"url = f\"https://maker.ifttt.com/trigger/spotify/json/with/key/{key}\"\n",
"tool = IFTTTWebhook(name=\"Spotify\", description=\"Add a song to spotify playlist\", url=url)"
"tool = IFTTTWebhook(\n",
" name=\"Spotify\", description=\"Add a song to spotify playlist\", url=url\n",
")"
]
},
{

View File

@@ -1,128 +1,129 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "245a954a",
"metadata": {},
"source": [
"# OpenWeatherMap API\n",
"\n",
"This notebook goes over how to use the OpenWeatherMap component to fetch weather information.\n",
"\n",
"First, you need to sign up for an OpenWeatherMap API key:\n",
"\n",
"1. Go to OpenWeatherMap and sign up for an API key [here](https://openweathermap.org/api/)\n",
"2. pip install pyowm\n",
"\n",
"Then we will need to set some environment variables:\n",
"1. Save your API KEY into OPENWEATHERMAP_API_KEY env variable"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "961b3689",
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"outputs": [],
"source": [
"pip install pyowm"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "34bb5968",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\""
]
},
{
"cell_type": "code",
"execution_count": 36,
"id": "ac4910f8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.utilities import OpenWeatherMapAPIWrapper"
]
},
{
"cell_type": "code",
"execution_count": 37,
"id": "84b8f773",
"metadata": {},
"outputs": [],
"source": [
"weather = OpenWeatherMapAPIWrapper()"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "9651f324-e74a-4f08-a28a-89db029f66f8",
"metadata": {},
"outputs": [],
"source": [
"weather_data = weather.run(\"London,GB\")"
]
},
{
"cell_type": "code",
"execution_count": 39,
"id": "028f4cba",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"In London,GB, the current weather is as follows:\n",
"Detailed status: overcast clouds\n",
"Wind speed: 4.63 m/s, direction: 150°\n",
"Humidity: 67%\n",
"Temperature: \n",
" - Current: 5.35°C\n",
" - High: 6.26°C\n",
" - Low: 3.49°C\n",
" - Feels like: 1.95°C\n",
"Rain: {}\n",
"Heat index: None\n",
"Cloud cover: 100%\n"
]
}
],
"source": [
"print(weather_data)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "245a954a",
"metadata": {},
"source": [
"# OpenWeatherMap API\n",
"\n",
"This notebook goes over how to use the OpenWeatherMap component to fetch weather information.\n",
"\n",
"First, you need to sign up for an OpenWeatherMap API key:\n",
"\n",
"1. Go to OpenWeatherMap and sign up for an API key [here](https://openweathermap.org/api/)\n",
"2. pip install pyowm\n",
"\n",
"Then we will need to set some environment variables:\n",
"1. Save your API KEY into OPENWEATHERMAP_API_KEY env variable"
]
},
"nbformat": 4,
"nbformat_minor": 5
{
"cell_type": "code",
"execution_count": null,
"id": "961b3689",
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"outputs": [],
"source": [
"pip install pyowm"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "34bb5968",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\""
]
},
{
"cell_type": "code",
"execution_count": 36,
"id": "ac4910f8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.utilities import OpenWeatherMapAPIWrapper"
]
},
{
"cell_type": "code",
"execution_count": 37,
"id": "84b8f773",
"metadata": {},
"outputs": [],
"source": [
"weather = OpenWeatherMapAPIWrapper()"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "9651f324-e74a-4f08-a28a-89db029f66f8",
"metadata": {},
"outputs": [],
"source": [
"weather_data = weather.run(\"London,GB\")"
]
},
{
"cell_type": "code",
"execution_count": 39,
"id": "028f4cba",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"In London,GB, the current weather is as follows:\n",
"Detailed status: overcast clouds\n",
"Wind speed: 4.63 m/s, direction: 150°\n",
"Humidity: 67%\n",
"Temperature: \n",
" - Current: 5.35°C\n",
" - High: 6.26°C\n",
" - Low: 3.49°C\n",
" - Feels like: 1.95°C\n",
"Rain: {}\n",
"Heat index: None\n",
"Cloud cover: 100%\n"
]
}
],
"source": [
"print(weather_data)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -64,7 +64,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -132,7 +134,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -200,7 +204,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -267,7 +273,9 @@
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{

View File

@@ -95,7 +95,9 @@
},
"outputs": [],
"source": [
"search = SearxSearchWrapper(searx_host=\"http://127.0.0.1:8888\", k=5) # k is for max number of items"
"search = SearxSearchWrapper(\n",
" searx_host=\"http://127.0.0.1:8888\", k=5\n",
") # k is for max number of items"
]
},
{
@@ -120,7 +122,7 @@
}
],
"source": [
"search.run(\"large language model \", engines=['wiki'])"
"search.run(\"large language model \", engines=[\"wiki\"])"
]
},
{
@@ -152,7 +154,7 @@
],
"source": [
"search = SearxSearchWrapper(searx_host=\"http://127.0.0.1:8888\", k=1)\n",
"search.run(\"deep learning\", language='es', engines=['wiki'])"
"search.run(\"deep learning\", language=\"es\", engines=[\"wiki\"])"
]
},
{
@@ -244,7 +246,12 @@
}
],
"source": [
"results = search.results(\"Large Language Model prompt\", num_results=5, categories='science', time_range='year')\n",
"results = search.results(\n",
" \"Large Language Model prompt\",\n",
" num_results=5,\n",
" categories=\"science\",\n",
" time_range=\"year\",\n",
")\n",
"pprint.pp(results)"
]
},
@@ -386,7 +393,9 @@
}
],
"source": [
"results = search.results(\"Large Language Model prompt\", num_results=5, engines=['arxiv'])\n",
"results = search.results(\n",
" \"Large Language Model prompt\", num_results=5, engines=[\"arxiv\"]\n",
")\n",
"pprint.pp(results)"
]
},
@@ -425,8 +434,8 @@
}
],
"source": [
"results = search.results(\"large language model\", num_results = 20, categories='it')\n",
"pprint.pp(list(filter(lambda r: r['engines'][0] == 'github', results)))"
"results = search.results(\"large language model\", num_results=20, categories=\"it\")\n",
"pprint.pp(list(filter(lambda r: r[\"engines\"][0] == \"github\", results)))"
]
},
{
@@ -578,7 +587,9 @@
}
],
"source": [
"results = search.results(\"large language model\", num_results = 20, engines=['github', 'gitlab'])\n",
"results = search.results(\n",
" \"large language model\", num_results=20, engines=[\"github\", \"gitlab\"]\n",
")\n",
"pprint.pp(results)"
]
}

File diff suppressed because one or more lines are too long

View File

@@ -42,7 +42,8 @@
"outputs": [],
"source": [
"import os\n",
"os.environ[\"WOLFRAM_ALPHA_APPID\"] = \"\"\n"
"\n",
"os.environ[\"WOLFRAM_ALPHA_APPID\"] = \"\""
]
},
{

View File

@@ -52,7 +52,7 @@
"# get from https://platform.openai.com/\n",
"os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\", \"\")\n",
"\n",
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in): \n",
"# get from https://nla.zapier.com/demo/provider/debug (under User Information, after logging in):\n",
"os.environ[\"ZAPIER_NLA_API_KEY\"] = os.environ.get(\"ZAPIER_NLA_API_KEY\", \"\")"
]
},
@@ -106,7 +106,9 @@
"llm = OpenAI(temperature=0)\n",
"zapier = ZapierNLAWrapper()\n",
"toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n",
"agent = initialize_agent(toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"agent = initialize_agent(\n",
" toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{
@@ -150,7 +152,9 @@
}
],
"source": [
"agent.run(\"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\")"
"agent.run(\n",
" \"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\"\n",
")"
]
},
{
@@ -206,10 +210,25 @@
"\n",
"GMAIL_SEARCH_INSTRUCTIONS = \"Grab the latest email from Silicon Valley Bank\"\n",
"\n",
"\n",
"def nla_gmail(inputs):\n",
" action = next((a for a in actions if a[\"description\"].startswith(\"Gmail: Find Email\")), None)\n",
" return {\"email_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(inputs[\"instructions\"])}\n",
"gmail_chain = TransformChain(input_variables=[\"instructions\"], output_variables=[\"email_data\"], transform=nla_gmail)"
" action = next(\n",
" (a for a in actions if a[\"description\"].startswith(\"Gmail: Find Email\")), None\n",
" )\n",
" return {\n",
" \"email_data\": ZapierNLARunAction(\n",
" action_id=action[\"id\"],\n",
" zapier_description=action[\"description\"],\n",
" params_schema=action[\"params\"],\n",
" ).run(inputs[\"instructions\"])\n",
" }\n",
"\n",
"\n",
"gmail_chain = TransformChain(\n",
" input_variables=[\"instructions\"],\n",
" output_variables=[\"email_data\"],\n",
" transform=nla_gmail,\n",
")"
]
},
{
@@ -229,7 +248,7 @@
"Draft email reply:\"\"\"\n",
"\n",
"prompt_template = PromptTemplate(input_variables=[\"email_data\"], template=template)\n",
"reply_chain = LLMChain(llm=OpenAI(temperature=.7), prompt=prompt_template)"
"reply_chain = LLMChain(llm=OpenAI(temperature=0.7), prompt=prompt_template)"
]
},
{
@@ -243,11 +262,31 @@
"\n",
"SLACK_HANDLE = \"@Ankush Gola\"\n",
"\n",
"\n",
"def nla_slack(inputs):\n",
" action = next((a for a in actions if a[\"description\"].startswith(\"Slack: Send Direct Message\")), None)\n",
" action = next(\n",
" (\n",
" a\n",
" for a in actions\n",
" if a[\"description\"].startswith(\"Slack: Send Direct Message\")\n",
" ),\n",
" None,\n",
" )\n",
" instructions = f'Send this to {SLACK_HANDLE} in Slack: {inputs[\"draft_reply\"]}'\n",
" return {\"slack_data\": ZapierNLARunAction(action_id=action[\"id\"], zapier_description=action[\"description\"], params_schema=action[\"params\"]).run(instructions)}\n",
"slack_chain = TransformChain(input_variables=[\"draft_reply\"], output_variables=[\"slack_data\"], transform=nla_slack)"
" return {\n",
" \"slack_data\": ZapierNLARunAction(\n",
" action_id=action[\"id\"],\n",
" zapier_description=action[\"description\"],\n",
" params_schema=action[\"params\"],\n",
" ).run(instructions)\n",
" }\n",
"\n",
"\n",
"slack_chain = TransformChain(\n",
" input_variables=[\"draft_reply\"],\n",
" output_variables=[\"slack_data\"],\n",
" transform=nla_slack,\n",
")"
]
},
{
@@ -290,7 +329,9 @@
"source": [
"## finally, execute\n",
"\n",
"overall_chain = SimpleSequentialChain(chains=[gmail_chain, reply_chain, slack_chain], verbose=True)\n",
"overall_chain = SimpleSequentialChain(\n",
" chains=[gmail_chain, reply_chain, slack_chain], verbose=True\n",
")\n",
"overall_chain.run(GMAIL_SEARCH_INSTRUCTIONS)"
]
},

View File

@@ -45,6 +45,7 @@
"def multiplier(a, b):\n",
" return a * b\n",
"\n",
"\n",
"def parsing_multiplier(string):\n",
" a, b = string.split(\",\")\n",
" return multiplier(int(a), int(b))"
@@ -60,12 +61,14 @@
"llm = OpenAI(temperature=0)\n",
"tools = [\n",
" Tool(\n",
" name = \"Multiplier\",\n",
" name=\"Multiplier\",\n",
" func=parsing_multiplier,\n",
" description=\"useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.\"\n",
" description=\"useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.\",\n",
" )\n",
"]\n",
"mrkl = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)"
"mrkl = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n",
")"
]
},
{

View File

@@ -189,6 +189,7 @@
"from langchain.llms import OpenAI\n",
"from langchain.schema import AgentAction, AgentFinish, LLMResult\n",
"\n",
"\n",
"class MyCustomCallbackHandler(BaseCallbackHandler):\n",
" \"\"\"Custom CallbackHandler.\"\"\"\n",
"\n",
@@ -276,13 +277,21 @@
" ) -> None:\n",
" \"\"\"Run on agent end.\"\"\"\n",
" print(finish.log)\n",
"\n",
"\n",
"manager = CallbackManager([MyCustomCallbackHandler()])\n",
"llm = OpenAI(temperature=0, callback_manager=manager, verbose=True)\n",
"tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, callback_manager=manager)\n",
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager\n",
" tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" callback_manager=manager,\n",
")\n",
"agent.run(\"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\")"
"agent.run(\n",
" \"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\"\n",
")"
]
},
{
@@ -325,6 +334,7 @@
"\n",
"from langchain.callbacks.base import AsyncCallbackHandler, AsyncCallbackManager\n",
"\n",
"\n",
"class MyCustomAsyncCallbackHandler(AsyncCallbackHandler):\n",
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
"\n",
@@ -343,15 +353,26 @@
" await asyncio.sleep(0.5)\n",
" print(\"\\n\\033[1m> Finished chain.\\033[0m\")\n",
"\n",
"\n",
"manager = AsyncCallbackManager([MyCustomAsyncCallbackHandler()])\n",
"\n",
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession, \n",
"# To make async requests in Tools more efficient, you can pass in your own aiohttp.ClientSession,\n",
"# but you must manually close the client session at the end of your program/event loop\n",
"aiosession = ClientSession()\n",
"llm = OpenAI(temperature=0, callback_manager=manager)\n",
"async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n",
"async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n",
"await async_agent.arun(\"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\")\n",
"async_tools = load_tools(\n",
" [\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager\n",
")\n",
"async_agent = initialize_agent(\n",
" async_tools,\n",
" llm,\n",
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
" verbose=True,\n",
" callback_manager=manager,\n",
")\n",
"await async_agent.arun(\n",
" \"Who won the US Open men's final in 2019? What is his age raised to the 0.334 power?\"\n",
")\n",
"await aiosession.close()"
]
},

View File

@@ -46,7 +46,10 @@
"outputs": [],
"source": [
"from langchain.chains.api import open_meteo_docs\n",
"chain_new = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True)"
"\n",
"chain_new = APIChain.from_llm_and_api_docs(\n",
" llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True\n",
")"
]
},
{
@@ -79,7 +82,9 @@
}
],
"source": [
"chain_new.run('What is the weather like right now in Munich, Germany in degrees Farenheit?')"
"chain_new.run(\n",
" \"What is the weather like right now in Munich, Germany in degrees Farenheit?\"\n",
")"
]
},
{
@@ -96,7 +101,8 @@
"outputs": [],
"source": [
"import os\n",
"os.environ['TMDB_BEARER_TOKEN'] = \"\""
"\n",
"os.environ[\"TMDB_BEARER_TOKEN\"] = \"\""
]
},
{
@@ -106,8 +112,11 @@
"outputs": [],
"source": [
"from langchain.chains.api import tmdb_docs\n",
"\n",
"headers = {\"Authorization\": f\"Bearer {os.environ['TMDB_BEARER_TOKEN']}\"}\n",
"chain = APIChain.from_llm_and_api_docs(llm, tmdb_docs.TMDB_DOCS, headers=headers, verbose=True)"
"chain = APIChain.from_llm_and_api_docs(\n",
" llm, tmdb_docs.TMDB_DOCS, headers=headers, verbose=True\n",
")"
]
},
{
@@ -168,12 +177,16 @@
"from langchain.chains import APIChain\n",
"\n",
"# Get api key here: https://www.listennotes.com/api/pricing/\n",
"listen_api_key = 'xxx'\n",
"listen_api_key = \"xxx\"\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"headers = {\"X-ListenAPI-Key\": listen_api_key}\n",
"chain = APIChain.from_llm_and_api_docs(llm, podcast_docs.PODCAST_DOCS, headers=headers, verbose=True)\n",
"chain.run(\"Search for 'silicon valley bank' podcast episodes, audio length is more than 30 minutes, return only 1 results\")\n"
"chain = APIChain.from_llm_and_api_docs(\n",
" llm, podcast_docs.PODCAST_DOCS, headers=headers, verbose=True\n",
")\n",
"chain.run(\n",
" \"Search for 'silicon valley bank' podcast episodes, audio length is more than 30 minutes, return only 1 results\"\n",
")"
]
},
{

View File

@@ -168,9 +168,9 @@
],
"source": [
"master_yoda_principal = ConstitutionalPrinciple(\n",
" name='Master Yoda Principle',\n",
" critique_request='Identify specific ways in which the model\\'s response is not in the style of Master Yoda.',\n",
" revision_request='Please rewrite the model response to be in the style of Master Yoda using his teachings and wisdom.',\n",
" name=\"Master Yoda Principle\",\n",
" critique_request=\"Identify specific ways in which the model's response is not in the style of Master Yoda.\",\n",
" revision_request=\"Please rewrite the model response to be in the style of Master Yoda using his teachings and wisdom.\",\n",
")\n",
"\n",
"constitutional_chain = ConstitutionalChain.from_llm(\n",

View File

@@ -50,7 +50,7 @@
"metadata": {},
"outputs": [],
"source": [
"chain = LLMRequestsChain(llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT))"
"chain = LLMRequestsChain(llm_chain=LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT))"
]
},
{
@@ -63,7 +63,7 @@
"question = \"What are the Three (3) biggest countries, and their respective sizes?\"\n",
"inputs = {\n",
" \"query\": question,\n",
" \"url\": \"https://www.google.com/search?q=\" + question.replace(\" \", \"+\")\n",
" \"url\": \"https://www.google.com/search?q=\" + question.replace(\" \", \"+\"),\n",
"}"
]
},

View File

@@ -25,7 +25,12 @@
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain\n",
"from langchain.chains import (\n",
" OpenAIModerationChain,\n",
" SequentialChain,\n",
" LLMChain,\n",
" SimpleSequentialChain,\n",
")\n",
"from langchain.prompts import PromptTemplate"
]
},
@@ -172,13 +177,13 @@
"outputs": [],
"source": [
"class CustomModeration(OpenAIModerationChain):\n",
" \n",
" def _moderate(self, text: str, results: dict) -> str:\n",
" if results[\"flagged\"]:\n",
" error_str = f\"The following text was found that violates OpenAI's content policy: {text}\"\n",
" return error_str\n",
" return text\n",
" \n",
"\n",
"\n",
"custom_moderation = CustomModeration()"
]
},
@@ -244,7 +249,9 @@
"outputs": [],
"source": [
"prompt = PromptTemplate(template=\"{text}\", input_variables=[\"text\"])\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
"llm_chain = LLMChain(\n",
" llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt\n",
")"
]
},
{
@@ -324,8 +331,12 @@
"metadata": {},
"outputs": [],
"source": [
"prompt = PromptTemplate(template=\"{setup}{new_input}Person2:\", input_variables=[\"setup\", \"new_input\"])\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
"prompt = PromptTemplate(\n",
" template=\"{setup}{new_input}Person2:\", input_variables=[\"setup\", \"new_input\"]\n",
")\n",
"llm_chain = LLMChain(\n",
" llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt\n",
")"
]
},
{
@@ -379,7 +390,9 @@
"metadata": {},
"outputs": [],
"source": [
"chain = SequentialChain(chains=[llm_chain, moderation_chain], input_variables=[\"setup\", \"new_input\"])"
"chain = SequentialChain(\n",
" chains=[llm_chain, moderation_chain], input_variables=[\"setup\", \"new_input\"]\n",
")"
]
},
{

View File

@@ -48,7 +48,9 @@
}
],
"source": [
"spec = OpenAPISpec.from_url(\"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\")"
"spec = OpenAPISpec.from_url(\n",
" \"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/\"\n",
")"
]
},
{
@@ -79,7 +81,7 @@
"metadata": {},
"outputs": [],
"source": [
"operation = APIOperation.from_openapi_spec(spec, '/public/openai/v0/products', \"get\")"
"operation = APIOperation.from_openapi_spec(spec, \"/public/openai/v0/products\", \"get\")"
]
},
{
@@ -103,7 +105,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI() # Load a Language Model"
"llm = OpenAI() # Load a Language Model"
]
},
{
@@ -114,11 +116,11 @@
"outputs": [],
"source": [
"chain = OpenAPIEndpointChain.from_api_operation(\n",
" operation, \n",
" llm, \n",
" requests=Requests(), \n",
" operation,\n",
" llm,\n",
" requests=Requests(),\n",
" verbose=True,\n",
" return_intermediate_steps=True # Return request and response text\n",
" return_intermediate_steps=True, # Return request and response text\n",
")"
]
},
@@ -268,12 +270,12 @@
"outputs": [],
"source": [
"chain = OpenAPIEndpointChain.from_api_operation(\n",
" operation, \n",
" llm, \n",
" requests=Requests(), \n",
" operation,\n",
" llm,\n",
" requests=Requests(),\n",
" verbose=True,\n",
" return_intermediate_steps=True, # Return request and response text\n",
" raw_response=True # Return raw response\n",
" return_intermediate_steps=True, # Return request and response text\n",
" raw_response=True, # Return raw response\n",
")"
]
},
@@ -411,7 +413,9 @@
"metadata": {},
"outputs": [],
"source": [
"operation = APIOperation.from_openapi_spec(spec, '/v1/public/openai/explain-task', \"post\")"
"operation = APIOperation.from_openapi_spec(\n",
" spec, \"/v1/public/openai/explain-task\", \"post\"\n",
")"
]
},
{
@@ -423,11 +427,8 @@
"source": [
"llm = OpenAI()\n",
"chain = OpenAPIEndpointChain.from_api_operation(\n",
" operation,\n",
" llm,\n",
" requests=Requests(),\n",
" verbose=True,\n",
" return_intermediate_steps=True)"
" operation, llm, requests=Requests(), verbose=True, return_intermediate_steps=True\n",
")"
]
},
{

View File

@@ -28,7 +28,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAI(model_name='code-davinci-002', temperature=0, max_tokens=512)"
"llm = OpenAI(model_name=\"code-davinci-002\", temperature=0, max_tokens=512)"
]
},
{
@@ -71,17 +71,17 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mdef solution():\n",
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mdef solution():\n",
" \"\"\"Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have?\"\"\"\n",
" cindy_pets = 4\n",
" marcia_pets = cindy_pets + 2\n",
" jan_pets = marcia_pets * 3\n",
" total_pets = cindy_pets + marcia_pets + jan_pets\n",
" result = total_pets\n",
" return result\u001B[0m\n",
" return result\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -139,8 +139,8 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3m# Put objects into a list to record ordering\n",
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
"objects = []\n",
"objects += [('booklet', 'blue')] * 2\n",
"objects += [('booklet', 'purple')] * 2\n",
@@ -151,9 +151,9 @@
"\n",
"# Count number of purple objects\n",
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
"answer = num_purple\u001B[0m\n",
"answer = num_purple\u001b[0m\n",
"\n",
"\u001B[1m> Finished PALChain chain.\u001B[0m\n"
"\u001b[1m> Finished PALChain chain.\u001b[0m\n"
]
},
{
@@ -187,7 +187,9 @@
"metadata": {},
"outputs": [],
"source": [
"pal_chain = PALChain.from_colored_object_prompt(llm, verbose=True, return_intermediate_steps=True)"
"pal_chain = PALChain.from_colored_object_prompt(\n",
" llm, verbose=True, return_intermediate_steps=True\n",
")"
]
},
{
@@ -212,8 +214,8 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new PALChain chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3m# Put objects into a list to record ordering\n",
"\u001b[1m> Entering new PALChain chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m# Put objects into a list to record ordering\n",
"objects = []\n",
"objects += [('booklet', 'blue')] * 2\n",
"objects += [('booklet', 'purple')] * 2\n",
@@ -224,9 +226,9 @@
"\n",
"# Count number of purple objects\n",
"num_purple = len([object for object in objects if object[1] == 'purple'])\n",
"answer = num_purple\u001B[0m\n",
"answer = num_purple\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
}
],
@@ -252,7 +254,7 @@
}
],
"source": [
"result['intermediate_steps']"
"result[\"intermediate_steps\"]"
]
},
{

View File

@@ -230,7 +230,9 @@
"metadata": {},
"outputs": [],
"source": [
"db_chain = SQLDatabaseChain(llm=llm, database=db, prompt=PROMPT, verbose=True, return_intermediate_steps=True)"
"db_chain = SQLDatabaseChain(\n",
" llm=llm, database=db, prompt=PROMPT, verbose=True, return_intermediate_steps=True\n",
")"
]
},
{
@@ -341,8 +343,11 @@
"source": [
"db = SQLDatabase.from_uri(\n",
" \"sqlite:///../../../../notebooks/Chinook.db\",\n",
" include_tables=['Track'], # we include only one table to save tokens in the prompt :)\n",
" sample_rows_in_table_info=2)"
" include_tables=[\n",
" \"Track\"\n",
" ], # we include only one table to save tokens in the prompt :)\n",
" sample_rows_in_table_info=2,\n",
")"
]
},
{
@@ -522,9 +527,10 @@
"source": [
"db = SQLDatabase.from_uri(\n",
" \"sqlite:///../../../../notebooks/Chinook.db\",\n",
" include_tables=['Track', 'Playlist'],\n",
" include_tables=[\"Track\", \"Playlist\"],\n",
" sample_rows_in_table_info=2,\n",
" custom_table_info=custom_table_info)\n",
" custom_table_info=custom_table_info,\n",
")\n",
"\n",
"print(db.table_info)"
]
@@ -598,6 +604,7 @@
"outputs": [],
"source": [
"from langchain.chains import SQLDatabaseSequentialChain\n",
"\n",
"db = SQLDatabase.from_uri(\"sqlite:///../../../../notebooks/Chinook.db\")"
]
},

View File

@@ -39,7 +39,7 @@
"\n",
"\n",
"SparkleSmile Toothpaste\n",
"\u001B[1mConcurrent executed in 1.54 seconds.\u001B[0m\n",
"\u001b[1mConcurrent executed in 1.54 seconds.\u001b[0m\n",
"\n",
"\n",
"BrightSmile Toothpaste Co.\n",
@@ -55,7 +55,7 @@
"\n",
"\n",
"BrightSmile Toothpaste.\n",
"\u001B[1mSerial executed in 6.38 seconds.\u001B[0m\n"
"\u001b[1mSerial executed in 6.38 seconds.\u001b[0m\n"
]
}
],
@@ -95,16 +95,17 @@
" tasks = [async_generate(chain) for _ in range(5)]\n",
" await asyncio.gather(*tasks)\n",
"\n",
"\n",
"s = time.perf_counter()\n",
"# If running this outside of Jupyter, use asyncio.run(generate_concurrently())\n",
"await generate_concurrently()\n",
"elapsed = time.perf_counter() - s\n",
"print('\\033[1m' + f\"Concurrent executed in {elapsed:0.2f} seconds.\" + '\\033[0m')\n",
"print(\"\\033[1m\" + f\"Concurrent executed in {elapsed:0.2f} seconds.\" + \"\\033[0m\")\n",
"\n",
"s = time.perf_counter()\n",
"generate_serially()\n",
"elapsed = time.perf_counter() - s\n",
"print('\\033[1m' + f\"Serial executed in {elapsed:0.2f} seconds.\" + '\\033[0m')"
"print(\"\\033[1m\" + f\"Serial executed in {elapsed:0.2f} seconds.\" + \"\\033[0m\")"
]
}
],

View File

@@ -93,7 +93,8 @@
],
"source": [
"from langchain.document_loaders import TextLoader\n",
"loader = TextLoader('../../state_of_the_union.txt')\n",
"\n",
"loader = TextLoader(\"../../state_of_the_union.txt\")\n",
"documents = loader.load()\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"texts = text_splitter.split_documents(documents)\n",

View File

@@ -42,13 +42,13 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001B[32;1m\u001B[1;3mQuestion: What NFL team won the Super Bowl in the year Justin Beiber was born?\n",
"\u001b[32;1m\u001b[1;3mQuestion: What NFL team won the Super Bowl in the year Justin Beiber was born?\n",
"\n",
"Answer: Let's think step by step.\u001B[0m\n",
"Answer: Let's think step by step.\u001b[0m\n",
"\n",
"\u001B[1m> Finished LLMChain chain.\u001B[0m\n"
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n"
]
},
{
@@ -95,11 +95,11 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001B[32;1m\u001B[1;3mWrite a sad poem about ducks.\u001B[0m\n",
"\u001b[32;1m\u001b[1;3mWrite a sad poem about ducks.\u001b[0m\n",
"\n",
"\u001B[1m> Finished LLMChain chain.\u001B[0m\n"
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n"
]
},
{
@@ -138,7 +138,7 @@
"outputs": [],
"source": [
"template = \"\"\"Write a {adjective} poem about {subject}.\"\"\"\n",
"llm_chain = LLMChain.from_string(llm=OpenAI(temperature=0), template=template)\n"
"llm_chain = LLMChain.from_string(llm=OpenAI(temperature=0), template=template)"
]
},
{

View File

@@ -53,7 +53,7 @@
"outputs": [],
"source": [
"# This is an LLMChain to write a synopsis given a title of a play.\n",
"llm = OpenAI(temperature=.7)\n",
"llm = OpenAI(temperature=0.7)\n",
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
"\n",
"Title: {title}\n",
@@ -70,7 +70,7 @@
"outputs": [],
"source": [
"# This is an LLMChain to write a review of a play given a synopsis.\n",
"llm = OpenAI(temperature=.7)\n",
"llm = OpenAI(temperature=0.7)\n",
"template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
"\n",
"Play Synopsis:\n",
@@ -89,7 +89,10 @@
"source": [
"# This is the overall chain where we run these two chains in sequence.\n",
"from langchain.chains import SimpleSequentialChain\n",
"overall_chain = SimpleSequentialChain(chains=[synopsis_chain, review_chain], verbose=True)"
"\n",
"overall_chain = SimpleSequentialChain(\n",
" chains=[synopsis_chain, review_chain], verbose=True\n",
")"
]
},
{
@@ -171,13 +174,13 @@
"outputs": [],
"source": [
"# This is an LLMChain to write a synopsis given a title of a play and the era it is set in.\n",
"llm = OpenAI(temperature=.7)\n",
"llm = OpenAI(temperature=0.7)\n",
"template = \"\"\"You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.\n",
"\n",
"Title: {title}\n",
"Era: {era}\n",
"Playwright: This is a synopsis for the above play:\"\"\"\n",
"prompt_template = PromptTemplate(input_variables=[\"title\", 'era'], template=template)\n",
"prompt_template = PromptTemplate(input_variables=[\"title\", \"era\"], template=template)\n",
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, output_key=\"synopsis\")"
]
},
@@ -189,7 +192,7 @@
"outputs": [],
"source": [
"# This is an LLMChain to write a review of a play given a synopsis.\n",
"llm = OpenAI(temperature=.7)\n",
"llm = OpenAI(temperature=0.7)\n",
"template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
"\n",
"Play Synopsis:\n",
@@ -208,12 +211,14 @@
"source": [
"# This is the overall chain where we run these two chains in sequence.\n",
"from langchain.chains import SequentialChain\n",
"\n",
"overall_chain = SequentialChain(\n",
" chains=[synopsis_chain, review_chain],\n",
" input_variables=[\"era\", \"title\"],\n",
" # Here we return multiple variables\n",
" output_variables=[\"synopsis\", \"review\"],\n",
" verbose=True)"
" verbose=True,\n",
")"
]
},
{
@@ -248,7 +253,7 @@
}
],
"source": [
"overall_chain({\"title\":\"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
"overall_chain({\"title\": \"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
]
},
{
@@ -304,7 +309,7 @@
"from langchain.chains import SequentialChain\n",
"from langchain.memory import SimpleMemory\n",
"\n",
"llm = OpenAI(temperature=.7)\n",
"llm = OpenAI(temperature=0.7)\n",
"template = \"\"\"You are a social media manager for a theater company. Given the title of play, the era it is set in, the date,time and location, the synopsis of the play, and the review of the play, it is your job to write a social media post for that play.\n",
"\n",
"Here is some context about the time and location of the play:\n",
@@ -318,18 +323,23 @@
"\n",
"Social Media Post:\n",
"\"\"\"\n",
"prompt_template = PromptTemplate(input_variables=[\"synopsis\", \"review\", \"time\", \"location\"], template=template)\n",
"prompt_template = PromptTemplate(\n",
" input_variables=[\"synopsis\", \"review\", \"time\", \"location\"], template=template\n",
")\n",
"social_chain = LLMChain(llm=llm, prompt=prompt_template, output_key=\"social_post_text\")\n",
"\n",
"overall_chain = SequentialChain(\n",
" memory=SimpleMemory(memories={\"time\": \"December 25th, 8pm PST\", \"location\": \"Theater in the Park\"}),\n",
" memory=SimpleMemory(\n",
" memories={\"time\": \"December 25th, 8pm PST\", \"location\": \"Theater in the Park\"}\n",
" ),\n",
" chains=[synopsis_chain, review_chain, social_chain],\n",
" input_variables=[\"era\", \"title\"],\n",
" # Here we return multiple variables\n",
" output_variables=[\"social_post_text\"],\n",
" verbose=True)\n",
" verbose=True,\n",
")\n",
"\n",
"overall_chain({\"title\":\"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
"overall_chain({\"title\": \"Tragedy at sunset on the beach\", \"era\": \"Victorian England\"})"
]
},
{

View File

@@ -26,11 +26,12 @@
"outputs": [],
"source": [
"from langchain import PromptTemplate, OpenAI, LLMChain\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)\n"
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)"
]
},
{
@@ -136,13 +137,13 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001B[32;1m\u001B[1;3mQuestion: whats 2 + 2\n",
"\u001b[32;1m\u001b[1;3mQuestion: whats 2 + 2\n",
"\n",
"Answer: Let's think step by step.\u001B[0m\n",
"Answer: Let's think step by step.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
@@ -257,9 +258,10 @@
" \"prompt_path\": \"prompt.json\",\n",
" \"llm_path\": \"llm.json\",\n",
" \"output_key\": \"text\",\n",
" \"_type\": \"llm_chain\"\n",
" \"_type\": \"llm_chain\",\n",
"}\n",
"import json\n",
"\n",
"with open(\"llm_chain_separate.json\", \"w\") as f:\n",
" json.dump(config, f, indent=2)"
]
@@ -319,13 +321,13 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001B[32;1m\u001B[1;3mQuestion: whats 2 + 2\n",
"\u001b[32;1m\u001b[1;3mQuestion: whats 2 + 2\n",
"\n",
"Answer: Let's think step by step.\u001B[0m\n",
"Answer: Let's think step by step.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{

View File

@@ -47,7 +47,10 @@
" shortened_text = \"\\n\\n\".join(text.split(\"\\n\\n\")[:3])\n",
" return {\"output_text\": shortened_text}\n",
"\n",
"transform_chain = TransformChain(input_variables=[\"text\"], output_variables=[\"output_text\"], transform=transform_func)"
"\n",
"transform_chain = TransformChain(\n",
" input_variables=[\"text\"], output_variables=[\"output_text\"], transform=transform_func\n",
")"
]
},
{

View File

@@ -73,6 +73,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"\n",
"chain = LLMChain(llm=llm, prompt=prompt)\n",
"\n",
"# Run the chain only specifying the input variable.\n",
@@ -109,12 +110,13 @@
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
")\n",
"\n",
"human_message_prompt = HumanMessagePromptTemplate(\n",
" prompt=PromptTemplate(\n",
" template=\"What is a good name for a company that makes {product}?\",\n",
" input_variables=[\"product\"],\n",
" )\n",
" prompt=PromptTemplate(\n",
" template=\"What is a good name for a company that makes {product}?\",\n",
" input_variables=[\"product\"],\n",
" )\n",
")\n",
"chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])\n",
"chat = ChatOpenAI(temperature=0.9)\n",
"chain = LLMChain(llm=chat, prompt=chat_prompt_template)\n",
@@ -189,6 +191,7 @@
],
"source": [
"from langchain.chains import SimpleSequentialChain\n",
"\n",
"overall_chain = SimpleSequentialChain(chains=[chain, chain_two], verbose=True)\n",
"\n",
"# Run the chain specifying only the input variable for the first chain.\n",
@@ -231,17 +234,19 @@
" @property\n",
" def input_keys(self) -> List[str]:\n",
" # Union of the input keys of the two chains.\n",
" all_input_vars = set(self.chain_1.input_keys).union(set(self.chain_2.input_keys))\n",
" all_input_vars = set(self.chain_1.input_keys).union(\n",
" set(self.chain_2.input_keys)\n",
" )\n",
" return list(all_input_vars)\n",
"\n",
" @property\n",
" def output_keys(self) -> List[str]:\n",
" return ['concat_output']\n",
" return [\"concat_output\"]\n",
"\n",
" def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:\n",
" output_1 = self.chain_1.run(inputs)\n",
" output_2 = self.chain_2.run(inputs)\n",
" return {'concat_output': output_1 + output_2}"
" return {\"concat_output\": output_1 + output_2}"
]
},
{

View File

@@ -142,7 +142,10 @@
}
],
"source": [
"qa_document_chain.run(input_document=state_of_the_union, question=\"what did the president say about justice breyer?\")"
"qa_document_chain.run(\n",
" input_document=state_of_the_union,\n",
" question=\"what did the president say about justice breyer?\",\n",
")"
]
},
{

View File

@@ -44,6 +44,7 @@
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"\n",
"loader = TextLoader(\"../../state_of_the_union.txt\")\n",
"documents = loader.load()"
]
@@ -121,7 +122,9 @@
},
"outputs": [],
"source": [
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever())"
"qa = ConversationalRetrievalChain.from_llm(\n",
" OpenAI(temperature=0), vectorstore.as_retriever()\n",
")"
]
},
{
@@ -211,7 +214,7 @@
}
],
"source": [
"result['answer']"
"result[\"answer\"]"
]
},
{
@@ -232,7 +235,9 @@
},
"outputs": [],
"source": [
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True)"
"qa = ConversationalRetrievalChain.from_llm(\n",
" OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True\n",
")"
]
},
{
@@ -269,7 +274,7 @@
}
],
"source": [
"result['source_documents'][0]"
"result[\"source_documents\"][0]"
]
},
{
@@ -302,10 +307,14 @@
},
"outputs": [],
"source": [
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True)\n",
"qa = ConversationalRetrievalChain.from_llm(\n",
" OpenAI(temperature=0), vectorstore.as_retriever(), return_source_documents=True\n",
")\n",
"chat_history = []\n",
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"result = qa({\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs})"
"result = qa(\n",
" {\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs}\n",
")"
]
},
{
@@ -385,7 +394,7 @@
}
],
"source": [
"result['answer']"
"result[\"answer\"]"
]
},
{
@@ -464,7 +473,7 @@
}
],
"source": [
"result['answer']"
"result[\"answer\"]"
]
},
{
@@ -489,19 +498,30 @@
"from langchain.chains.llm import LLMChain\n",
"from langchain.callbacks.base import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT\n",
"from langchain.chains.conversational_retrieval.prompts import (\n",
" CONDENSE_QUESTION_PROMPT,\n",
" QA_PROMPT,\n",
")\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"\n",
"# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n",
"# and a separate, non-streaming llm for question generation\n",
"llm = OpenAI(temperature=0)\n",
"streaming_llm = OpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)\n",
"streaming_llm = OpenAI(\n",
" streaming=True,\n",
" callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),\n",
" verbose=True,\n",
" temperature=0,\n",
")\n",
"\n",
"question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)\n",
"doc_chain = load_qa_chain(streaming_llm, chain_type=\"stuff\", prompt=QA_PROMPT)\n",
"\n",
"qa = ConversationalRetrievalChain(\n",
" retriever=vectorstore.as_retriever(), combine_docs_chain=doc_chain, question_generator=question_generator)"
" retriever=vectorstore.as_retriever(),\n",
" combine_docs_chain=doc_chain,\n",
" question_generator=question_generator,\n",
")"
]
},
{
@@ -545,7 +565,7 @@
"source": [
"chat_history = [(query, result[\"answer\"])]\n",
"query = \"Did he mention who she suceeded\"\n",
"result = qa({\"question\": query, \"chat_history\": chat_history})\n"
"result = qa({\"question\": query, \"chat_history\": chat_history})"
]
},
{
@@ -571,7 +591,11 @@
" for human, ai in inputs:\n",
" res.append(f\"Human:{human}\\nAI:{ai}\")\n",
" return \"\\n\".join(res)\n",
"qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever(), get_chat_history=get_chat_history)"
"\n",
"\n",
"qa = ConversationalRetrievalChain.from_llm(\n",
" OpenAI(temperature=0), vectorstore.as_retriever(), get_chat_history=get_chat_history\n",
")"
]
},
{
@@ -608,7 +632,7 @@
}
],
"source": [
"result['answer']"
"result[\"answer\"]"
]
},
{

View File

@@ -91,7 +91,9 @@
"metadata": {},
"outputs": [],
"source": [
"embeddings = HypotheticalDocumentEmbedder.from_llm(multi_llm, base_embeddings, \"web_search\")"
"embeddings = HypotheticalDocumentEmbedder.from_llm(\n",
" multi_llm, base_embeddings, \"web_search\"\n",
")"
]
},
{
@@ -136,7 +138,9 @@
"metadata": {},
"outputs": [],
"source": [
"embeddings = HypotheticalDocumentEmbedder(llm_chain=llm_chain, base_embeddings=base_embeddings)"
"embeddings = HypotheticalDocumentEmbedder(\n",
" llm_chain=llm_chain, base_embeddings=base_embeddings\n",
")"
]
},
{
@@ -146,7 +150,9 @@
"metadata": {},
"outputs": [],
"source": [
"result = embeddings.embed_query(\"What did the president say about Ketanji Brown Jackson\")"
"result = embeddings.embed_query(\n",
" \"What did the president say about Ketanji Brown Jackson\"\n",
")"
]
},
{

View File

@@ -66,7 +66,9 @@
}
],
"source": [
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))])"
"docsearch = Chroma.from_texts(\n",
" texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]\n",
")"
]
},
{
@@ -213,7 +215,9 @@
"FINAL ANSWER IN ITALIAN:\"\"\"\n",
"PROMPT = PromptTemplate(template=template, input_variables=[\"summaries\", \"question\"])\n",
"\n",
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"stuff\", prompt=PROMPT)\n",
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0), chain_type=\"stuff\", prompt=PROMPT\n",
")\n",
"query = \"What did the president say about Justice Breyer\"\n",
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]
@@ -277,7 +281,9 @@
"metadata": {},
"outputs": [],
"source": [
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True)"
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True\n",
")"
]
},
{
@@ -337,7 +343,6 @@
}
],
"source": [
"\n",
"question_prompt_template = \"\"\"Use the following portion of a long document to see if any of the text is relevant to answer the question. \n",
"Return any relevant text in Italian.\n",
"{context}\n",
@@ -361,7 +366,13 @@
" template=combine_prompt_template, input_variables=[\"summaries\", \"question\"]\n",
")\n",
"\n",
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True, question_prompt=QUESTION_PROMPT, combine_prompt=COMBINE_PROMPT)\n",
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"map_reduce\",\n",
" return_intermediate_steps=True,\n",
" question_prompt=QUESTION_PROMPT,\n",
" combine_prompt=COMBINE_PROMPT,\n",
")\n",
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]
},
@@ -438,7 +449,9 @@
"metadata": {},
"outputs": [],
"source": [
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True)"
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True\n",
")"
]
},
{
@@ -537,7 +550,13 @@
}
],
"source": [
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True, question_prompt=question_prompt, refine_prompt=refine_prompt)\n",
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"refine\",\n",
" return_intermediate_steps=True,\n",
" question_prompt=question_prompt,\n",
" refine_prompt=refine_prompt,\n",
")\n",
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]
},
@@ -558,7 +577,12 @@
"metadata": {},
"outputs": [],
"source": [
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", metadata_keys=['source'], return_intermediate_steps=True)"
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"map_rerank\",\n",
" metadata_keys=[\"source\"],\n",
" return_intermediate_steps=True,\n",
")"
]
},
{
@@ -663,7 +687,13 @@
" input_variables=[\"context\", \"question\"],\n",
" output_parser=output_parser,\n",
")\n",
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", metadata_keys=['source'], return_intermediate_steps=True, prompt=PROMPT)\n",
"chain = load_qa_with_sources_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"map_rerank\",\n",
" metadata_keys=[\"source\"],\n",
" return_intermediate_steps=True,\n",
" prompt=PROMPT,\n",
")\n",
"query = \"What did the president say about Justice Breyer\"\n",
"result = chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]

View File

@@ -71,7 +71,9 @@
}
],
"source": [
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]).as_retriever()"
"docsearch = Chroma.from_texts(\n",
" texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]\n",
").as_retriever()"
]
},
{
@@ -296,7 +298,9 @@
},
"outputs": [],
"source": [
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True)"
"chain = load_qa_chain(\n",
" OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True\n",
")"
]
},
{
@@ -380,7 +384,13 @@
"COMBINE_PROMPT = PromptTemplate(\n",
" template=combine_prompt_template, input_variables=[\"summaries\", \"question\"]\n",
")\n",
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True, question_prompt=QUESTION_PROMPT, combine_prompt=COMBINE_PROMPT)\n",
"chain = load_qa_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"map_reduce\",\n",
" return_map_steps=True,\n",
" question_prompt=QUESTION_PROMPT,\n",
" combine_prompt=COMBINE_PROMPT,\n",
")\n",
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]
},
@@ -463,7 +473,9 @@
},
"outputs": [],
"source": [
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True)"
"chain = load_qa_chain(\n",
" OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True\n",
")"
]
},
{
@@ -556,8 +568,13 @@
"initial_qa_prompt = PromptTemplate(\n",
" input_variables=[\"context_str\", \"question\"], template=initial_qa_template\n",
")\n",
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True,\n",
" question_prompt=initial_qa_prompt, refine_prompt=refine_prompt)\n",
"chain = load_qa_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"refine\",\n",
" return_refine_steps=True,\n",
" question_prompt=initial_qa_prompt,\n",
" refine_prompt=refine_prompt,\n",
")\n",
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]
},
@@ -580,7 +597,9 @@
},
"outputs": [],
"source": [
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", return_intermediate_steps=True)"
"chain = load_qa_chain(\n",
" OpenAI(temperature=0), chain_type=\"map_rerank\", return_intermediate_steps=True\n",
")"
]
},
{
@@ -711,7 +730,12 @@
" output_parser=output_parser,\n",
")\n",
"\n",
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_rerank\", return_intermediate_steps=True, prompt=PROMPT)\n",
"chain = load_qa_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"map_rerank\",\n",
" return_intermediate_steps=True,\n",
" prompt=PROMPT,\n",
")\n",
"query = \"What did the president say about Justice Breyer\"\n",
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
]

View File

@@ -248,7 +248,9 @@
"metadata": {},
"outputs": [],
"source": [
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True)"
"chain = load_summarize_chain(\n",
" OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True\n",
")"
]
},
{
@@ -314,7 +316,13 @@
"\n",
"CONCISE SUMMARY IN ITALIAN:\"\"\"\n",
"PROMPT = PromptTemplate(template=prompt_template, input_variables=[\"text\"])\n",
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)\n",
"chain = load_summarize_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"map_reduce\",\n",
" return_intermediate_steps=True,\n",
" map_prompt=PROMPT,\n",
" combine_prompt=PROMPT,\n",
")\n",
"chain({\"input_documents\": docs}, return_only_outputs=True)"
]
},
@@ -382,7 +390,9 @@
}
],
"source": [
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True)\n",
"chain = load_summarize_chain(\n",
" OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True\n",
")\n",
"\n",
"chain({\"input_documents\": docs}, return_only_outputs=True)"
]
@@ -441,7 +451,13 @@
" input_variables=[\"existing_answer\", \"text\"],\n",
" template=refine_template,\n",
")\n",
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"refine\", return_intermediate_steps=True, question_prompt=PROMPT, refine_prompt=refine_prompt)\n",
"chain = load_summarize_chain(\n",
" OpenAI(temperature=0),\n",
" chain_type=\"refine\",\n",
" return_intermediate_steps=True,\n",
" question_prompt=PROMPT,\n",
" refine_prompt=refine_prompt,\n",
")\n",
"chain({\"input_documents\": docs}, return_only_outputs=True)"
]
},

View File

@@ -41,6 +41,7 @@
],
"source": [
"from langchain.document_loaders import TextLoader\n",
"\n",
"loader = TextLoader(\"../../state_of_the_union.txt\")\n",
"documents = loader.load()\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
@@ -57,7 +58,9 @@
"metadata": {},
"outputs": [],
"source": [
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
"qa = RetrievalQA.from_chain_type(\n",
" llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
")"
]
},
{
@@ -100,7 +103,9 @@
"metadata": {},
"outputs": [],
"source": [
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", retriever=docsearch.as_retriever())"
"qa = RetrievalQA.from_chain_type(\n",
" llm=OpenAI(), chain_type=\"map_reduce\", retriever=docsearch.as_retriever()\n",
")"
]
},
{
@@ -141,6 +146,7 @@
"outputs": [],
"source": [
"from langchain.chains.question_answering import load_qa_chain\n",
"\n",
"qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
"qa = RetrievalQA(combine_documents_chain=qa_chain, retriever=docsearch.as_retriever())"
]
@@ -184,6 +190,7 @@
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"\n",
"prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
"\n",
"{context}\n",
@@ -203,7 +210,12 @@
"outputs": [],
"source": [
"chain_type_kwargs = {\"prompt\": PROMPT}\n",
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), chain_type_kwargs=chain_type_kwargs)"
"qa = RetrievalQA.from_chain_type(\n",
" llm=OpenAI(),\n",
" chain_type=\"stuff\",\n",
" retriever=docsearch.as_retriever(),\n",
" chain_type_kwargs=chain_type_kwargs,\n",
")"
]
},
{
@@ -244,7 +256,12 @@
"metadata": {},
"outputs": [],
"source": [
"qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"stuff\", retriever=docsearch.as_retriever(), return_source_documents=True)"
"qa = RetrievalQA.from_chain_type(\n",
" llm=OpenAI(),\n",
" chain_type=\"stuff\",\n",
" retriever=docsearch.as_retriever(),\n",
" return_source_documents=True,\n",
")"
]
},
{

View File

@@ -55,7 +55,9 @@
}
],
"source": [
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": f\"{i}-pl\"} for i in range(len(texts))])"
"docsearch = Chroma.from_texts(\n",
" texts, embeddings, metadatas=[{\"source\": f\"{i}-pl\"} for i in range(len(texts))]\n",
")"
]
},
{
@@ -77,7 +79,9 @@
"source": [
"from langchain import OpenAI\n",
"\n",
"chain = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"stuff\", retriever=docsearch.as_retriever())"
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
" OpenAI(temperature=0), chain_type=\"stuff\", retriever=docsearch.as_retriever()\n",
")"
]
},
{
@@ -99,7 +103,10 @@
}
],
"source": [
"chain({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
"chain(\n",
" {\"question\": \"What did the president say about Justice Breyer\"},\n",
" return_only_outputs=True,\n",
")"
]
},
{
@@ -120,7 +127,9 @@
"metadata": {},
"outputs": [],
"source": [
"chain = RetrievalQAWithSourcesChain.from_chain_type(OpenAI(temperature=0), chain_type=\"map_reduce\", retriever=docsearch.as_retriever())"
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
" OpenAI(temperature=0), chain_type=\"map_reduce\", retriever=docsearch.as_retriever()\n",
")"
]
},
{
@@ -142,7 +151,10 @@
}
],
"source": [
"chain({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
"chain(\n",
" {\"question\": \"What did the president say about Justice Breyer\"},\n",
" return_only_outputs=True,\n",
")"
]
},
{
@@ -161,8 +173,11 @@
"outputs": [],
"source": [
"from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n",
"\n",
"qa_chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
"qa = RetrievalQAWithSourcesChain(combine_documents_chain=qa_chain, retriever=docsearch.as_retriever())"
"qa = RetrievalQAWithSourcesChain(\n",
" combine_documents_chain=qa_chain, retriever=docsearch.as_retriever()\n",
")"
]
},
{
@@ -184,7 +199,10 @@
}
],
"source": [
"qa({\"question\": \"What did the president say about Justice Breyer\"}, return_only_outputs=True)"
"qa(\n",
" {\"question\": \"What did the president say about Justice Breyer\"},\n",
" return_only_outputs=True,\n",
")"
]
},
{

View File

@@ -72,6 +72,7 @@
" github_url = f\"https://github.com/{repo_owner}/{repo_name}/blob/{git_sha}/{relative_path}\"\n",
" yield Document(page_content=f.read(), metadata={\"source\": github_url})\n",
"\n",
"\n",
"sources = get_github_docs(\"yirenlu92\", \"deno-manual-forked\")\n",
"\n",
"source_chunks = []\n",
@@ -115,14 +116,13 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"\n",
"prompt_template = \"\"\"Use the context below to write a 400 word blog post about the topic below:\n",
" Context: {context}\n",
" Topic: {topic}\n",
" Blog post:\"\"\"\n",
"\n",
"PROMPT = PromptTemplate(\n",
" template=prompt_template, input_variables=[\"context\", \"topic\"]\n",
")\n",
"PROMPT = PromptTemplate(template=prompt_template, input_variables=[\"context\", \"topic\"])\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"\n",

View File

@@ -67,7 +67,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = AirbyteJSONLoader('/tmp/airbyte_local/json_data/_airbyte_raw_pokemon.jsonl')"
"loader = AirbyteJSONLoader(\"/tmp/airbyte_local/json_data/_airbyte_raw_pokemon.jsonl\")"
]
},
{

View File

@@ -78,7 +78,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = AzureBlobStorageContainerLoader(conn_str=\"<conn_str>\", container=\"<container>\", prefix=\"<prefix>\")"
"loader = AzureBlobStorageContainerLoader(\n",
" conn_str=\"<conn_str>\", container=\"<container>\", prefix=\"<prefix>\"\n",
")"
]
},
{

View File

@@ -38,7 +38,11 @@
"metadata": {},
"outputs": [],
"source": [
"loader = AzureBlobStorageFileLoader(conn_str='<connection string>', container='<container name>', blob_name='<blob name>')"
"loader = AzureBlobStorageFileLoader(\n",
" conn_str=\"<connection string>\",\n",
" container=\"<container name>\",\n",
" blob_name=\"<blob name>\",\n",
")"
]
},
{

View File

@@ -24,7 +24,7 @@
"metadata": {},
"outputs": [],
"source": [
"BASE_QUERY = '''\n",
"BASE_QUERY = \"\"\"\n",
"SELECT\n",
" id,\n",
" dna_sequence,\n",
@@ -41,7 +41,7 @@
" SELECT\n",
" AS STRUCT 3 AS id, \"TCCGGA\" AS dna_sequence, \"Acidianus hospitalis (strain W1).\" AS organism) AS new_array),\n",
" UNNEST(new_array)\n",
"'''"
"\"\"\""
]
},
{
@@ -92,7 +92,11 @@
"metadata": {},
"outputs": [],
"source": [
"loader = BigQueryLoader(BASE_QUERY, page_content_columns=[\"dna_sequence\", \"organism\"], metadata_columns=[\"id\"])\n",
"loader = BigQueryLoader(\n",
" BASE_QUERY,\n",
" page_content_columns=[\"dna_sequence\", \"organism\"],\n",
" metadata_columns=[\"id\"],\n",
")\n",
"\n",
"data = loader.load()"
]
@@ -128,7 +132,7 @@
"outputs": [],
"source": [
"# Note that the `id` column is being returned twice, with one instance aliased as `source`\n",
"ALIASED_QUERY = '''\n",
"ALIASED_QUERY = \"\"\"\n",
"SELECT\n",
" id,\n",
" dna_sequence,\n",
@@ -146,7 +150,7 @@
" SELECT\n",
" AS STRUCT 3 AS id, \"TCCGGA\" AS dna_sequence, \"Acidianus hospitalis (strain W1).\" AS organism) AS new_array),\n",
" UNNEST(new_array)\n",
"'''"
"\"\"\""
]
},
{

View File

@@ -43,9 +43,7 @@
},
"outputs": [],
"source": [
"loader = BiliBiliLoader(\n",
" [\"https://www.bilibili.com/video/BV1xt411o7Xu/\"]\n",
")"
"loader = BiliBiliLoader([\"https://www.bilibili.com/video/BV1xt411o7Xu/\"])"
]
},
{

View File

@@ -26,7 +26,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = CollegeConfidentialLoader(\"https://www.collegeconfidential.com/colleges/brown-university/\")"
"loader = CollegeConfidentialLoader(\n",
" \"https://www.collegeconfidential.com/colleges/brown-university/\"\n",
")"
]
},
{

View File

@@ -30,7 +30,7 @@
},
"outputs": [],
"source": [
"loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv')\n",
"loader = CSVLoader(file_path=\"./example_data/mlb_teams_2012.csv\")\n",
"\n",
"data = loader.load()"
]
@@ -73,11 +73,14 @@
},
"outputs": [],
"source": [
"loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv', csv_args={\n",
" 'delimiter': ',',\n",
" 'quotechar': '\"',\n",
" 'fieldnames': ['MLB Team', 'Payroll in millions', 'Wins']\n",
"})\n",
"loader = CSVLoader(\n",
" file_path=\"./example_data/mlb_teams_2012.csv\",\n",
" csv_args={\n",
" \"delimiter\": \",\",\n",
" \"quotechar\": '\"',\n",
" \"fieldnames\": [\"MLB Team\", \"Payroll in millions\", \"Wins\"],\n",
" },\n",
")\n",
"\n",
"data = loader.load()"
]
@@ -119,7 +122,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = CSVLoader(file_path='./example_data/mlb_teams_2012.csv', source_column=\"Team\")\n",
"loader = CSVLoader(file_path=\"./example_data/mlb_teams_2012.csv\", source_column=\"Team\")\n",
"\n",
"data = loader.load()"
]

View File

@@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"df = pd.read_csv('example_data/mlb_teams_2012.csv')"
"df = pd.read_csv(\"example_data/mlb_teams_2012.csv\")"
]
},
{

View File

@@ -34,7 +34,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = DirectoryLoader('../', glob=\"**/*.md\")"
"loader = DirectoryLoader(\"../\", glob=\"**/*.md\")"
]
},
{
@@ -94,7 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = DirectoryLoader('../', glob=\"**/*.md\", loader_cls=TextLoader)"
"loader = DirectoryLoader(\"../\", glob=\"**/*.md\", loader_cls=TextLoader)"
]
},
{

View File

@@ -82,7 +82,7 @@
"loader = DuckDBLoader(\n",
" \"SELECT * FROM read_csv_auto('example.csv')\",\n",
" page_content_columns=[\"Team\"],\n",
" metadata_columns=[\"Payroll\"]\n",
" metadata_columns=[\"Payroll\"],\n",
")\n",
"\n",
"data = loader.load()"
@@ -120,7 +120,7 @@
"source": [
"loader = DuckDBLoader(\n",
" \"SELECT Team, Payroll, Team As source FROM read_csv_auto('example.csv')\",\n",
" metadata_columns=[\"source\"]\n",
" metadata_columns=[\"source\"],\n",
")\n",
"\n",
"data = loader.load()"

View File

@@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredEmailLoader('example_data/fake-email.eml')"
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\")"
]
},
{
@@ -86,7 +86,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredEmailLoader('example_data/fake-email.eml', mode=\"elements\")"
"loader = UnstructuredEmailLoader(\"example_data/fake-email.eml\", mode=\"elements\")"
]
},
{
@@ -145,7 +145,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = OutlookMessageLoader('example_data/fake-email.msg')"
"loader = OutlookMessageLoader(\"example_data/fake-email.msg\")"
]
},
{

View File

@@ -59,9 +59,9 @@
"outputs": [],
"source": [
"figma_loader = FigmaFileLoader(\n",
" os.environ.get('ACCESS_TOKEN'),\n",
" os.environ.get('NODE_IDS'),\n",
" os.environ.get('FILE_KEY')\n",
" os.environ.get(\"ACCESS_TOKEN\"),\n",
" os.environ.get(\"NODE_IDS\"),\n",
" os.environ.get(\"FILE_KEY\"),\n",
")"
]
},
@@ -92,17 +92,23 @@
" Figma file nodes and metadata: {context}\"\"\"\n",
"\n",
" human_prompt_template = \"Code the {text}. Ensure it's mobile responsive\"\n",
" system_message_prompt = SystemMessagePromptTemplate.from_template(system_prompt_template)\n",
" human_message_prompt = HumanMessagePromptTemplate.from_template(human_prompt_template)\n",
" system_message_prompt = SystemMessagePromptTemplate.from_template(\n",
" system_prompt_template\n",
" )\n",
" human_message_prompt = HumanMessagePromptTemplate.from_template(\n",
" human_prompt_template\n",
" )\n",
" # delete the gpt-4 model_name to use the default gpt-3.5 turbo for faster results\n",
" gpt_4 = ChatOpenAI(temperature=.02, model_name='gpt-4')\n",
" gpt_4 = ChatOpenAI(temperature=0.02, model_name=\"gpt-4\")\n",
" # Use the retriever's 'get_relevant_documents' method if needed to filter down longer docs\n",
" relevant_nodes = figma_doc_retriever.get_relevant_documents(human_input)\n",
" conversation = [system_message_prompt, human_message_prompt]\n",
" chat_prompt = ChatPromptTemplate.from_messages(conversation)\n",
" response = gpt_4(chat_prompt.format_prompt( \n",
" context=relevant_nodes, \n",
" text=human_input).to_messages())\n",
" response = gpt_4(\n",
" chat_prompt.format_prompt(\n",
" context=relevant_nodes, text=human_input\n",
" ).to_messages()\n",
" )\n",
" return response"
]
},

View File

@@ -157,7 +157,10 @@
"from langchain.document_loaders import GitLoader\n",
"\n",
"# eg. loading only python files\n",
"loader = GitLoader(repo_path=\"./example_data/test_repo1/\", file_filter=lambda file_path: file_path.endswith(\".py\"))"
"loader = GitLoader(\n",
" repo_path=\"./example_data/test_repo1/\",\n",
" file_filter=lambda file_path: file_path.endswith(\".py\"),\n",
")"
]
},
{

View File

@@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"loader = GutenbergLoader('https://www.gutenberg.org/cache/epub/69972/pg69972.txt')"
"loader = GutenbergLoader(\"https://www.gutenberg.org/cache/epub/69972/pg69972.txt\")"
]
},
{

View File

@@ -57,7 +57,9 @@
"execution_count": 4,
"outputs": [],
"source": [
"loader = IFixitLoader(\"https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself\")\n",
"loader = IFixitLoader(\n",
" \"https://www.ifixit.com/Answers/View/318583/My+iPhone+6+is+typing+and+opening+apps+by+itself\"\n",
")\n",
"data = loader.load()"
],
"metadata": {

View File

@@ -24,7 +24,12 @@
"metadata": {},
"outputs": [],
"source": [
"loader = NotebookLoader(\"example_data/notebook.ipynb\", include_outputs=True, max_output_length=20, remove_newline=True)"
"loader = NotebookLoader(\n",
" \"example_data/notebook.ipynb\",\n",
" include_outputs=True,\n",
" max_output_length=20,\n",
" remove_newline=True,\n",
")"
]
},
{

View File

@@ -76,6 +76,7 @@
],
"source": [
"from getpass import getpass\n",
"\n",
"NOTION_TOKEN = getpass()\n",
"DATABASE_ID = getpass()"
]

View File

@@ -78,7 +78,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredPowerPointLoader(\"example_data/fake-power-point.pptx\", mode=\"elements\")"
"loader = UnstructuredPowerPointLoader(\n",
" \"example_data/fake-power-point.pptx\", mode=\"elements\"\n",
")"
]
},
{

View File

@@ -39,6 +39,7 @@
"source": [
"# fixes a bug with asyncio and jupyter\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
@@ -99,7 +100,7 @@
"source": [
"loader = SitemapLoader(\n",
" \"https://langchain.readthedocs.io/sitemap.xml\",\n",
" filter_urls=[\"https://python.langchain.com/en/latest/\"]\n",
" filter_urls=[\"https://python.langchain.com/en/latest/\"],\n",
")\n",
"documents = loader.load()"
]

View File

@@ -28,7 +28,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import SlackDirectoryLoader "
"from langchain.document_loaders import SlackDirectoryLoader"
]
},
{
@@ -40,7 +40,7 @@
"source": [
"# Optionally set your Slack URL. This will give you proper URLs in the docs sources.\n",
"SLACK_WORKSPACE_URL = \"https://xxx.slack.com\"\n",
"LOCAL_ZIPFILE = \"\" # Paste the local paty to your Slack zip file here.\n",
"LOCAL_ZIPFILE = \"\" # Paste the local paty to your Slack zip file here.\n",
"\n",
"loader = SlackDirectoryLoader(LOCAL_ZIPFILE, SLACK_WORKSPACE_URL)"
]

View File

@@ -26,7 +26,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = SRTLoader(\"example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt\")"
"loader = SRTLoader(\n",
" \"example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt\"\n",
")"
]
},
{

View File

@@ -118,7 +118,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\"./example_data/state_of_the_union.txt\", mode=\"elements\")"
"loader = UnstructuredFileLoader(\n",
" \"./example_data/state_of_the_union.txt\", mode=\"elements\"\n",
")"
]
},
{
@@ -183,7 +185,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\"layout-parser-paper-fast.pdf\", strategy=\"fast\", mode=\"elements\")"
"loader = UnstructuredFileLoader(\n",
" \"layout-parser-paper-fast.pdf\", strategy=\"fast\", mode=\"elements\"\n",
")"
]
},
{
@@ -248,7 +252,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = UnstructuredFileLoader(\"./example_data/layout-parser-paper.pdf\", mode=\"elements\")"
"loader = UnstructuredFileLoader(\n",
" \"./example_data/layout-parser-paper.pdf\", mode=\"elements\"\n",
")"
]
},
{

View File

@@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
" from langchain.document_loaders import UnstructuredURLLoader"
"from langchain.document_loaders import UnstructuredURLLoader"
]
},
{
@@ -29,8 +29,8 @@
"source": [
"urls = [\n",
" \"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023\",\n",
" \"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-9-2023\"\n",
"]\n"
" \"https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-9-2023\",\n",
"]"
]
},
{
@@ -89,7 +89,7 @@
"source": [
"urls = [\n",
" \"https://www.youtube.com/watch?v=dQw4w9WgXcQ\",\n",
" \"https://goo.gl/maps/NDSHwePEyaHMFGwh8\"\n",
" \"https://goo.gl/maps/NDSHwePEyaHMFGwh8\",\n",
"]"
]
},
@@ -162,7 +162,7 @@
"source": [
"urls = [\n",
" \"https://www.youtube.com/watch?v=dQw4w9WgXcQ\",\n",
" \"https://goo.gl/maps/NDSHwePEyaHMFGwh8\"\n",
" \"https://goo.gl/maps/NDSHwePEyaHMFGwh8\",\n",
"]"
]
},

View File

@@ -206,10 +206,12 @@
}
],
"source": [
"loader = WebBaseLoader(\"https://www.govinfo.gov/content/pkg/CFR-2018-title10-vol3/xml/CFR-2018-title10-vol3-sec431-86.xml\")\n",
"loader = WebBaseLoader(\n",
" \"https://www.govinfo.gov/content/pkg/CFR-2018-title10-vol3/xml/CFR-2018-title10-vol3-sec431-86.xml\"\n",
")\n",
"loader.default_parser = \"xml\"\n",
"docs = loader.load()\n",
"docs\n"
"docs"
]
},
{

View File

@@ -18,7 +18,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import YoutubeLoader\n"
"from langchain.document_loaders import YoutubeLoader"
]
},
{
@@ -40,7 +40,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = YoutubeLoader.from_youtube_url(\"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True)"
"loader = YoutubeLoader.from_youtube_url(\n",
" \"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True\n",
")"
]
},
{
@@ -78,7 +80,9 @@
"metadata": {},
"outputs": [],
"source": [
"loader = YoutubeLoader.from_youtube_url(\"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True)"
"loader = YoutubeLoader.from_youtube_url(\n",
" \"https://www.youtube.com/watch?v=QsYGlZkevEg\", add_video_info=True\n",
")"
]
},
{
@@ -121,7 +125,7 @@
"source": [
"from langchain.document_loaders import GoogleApiClient, GoogleApiYoutubeLoader\n",
"\n",
"# Init the GoogleApiClient \n",
"# Init the GoogleApiClient\n",
"from pathlib import Path\n",
"\n",
"\n",
@@ -129,11 +133,17 @@
"\n",
"\n",
"# Use a Channel\n",
"youtube_loader_channel = GoogleApiYoutubeLoader(google_api_client=google_api_client, channel_name=\"Reducible\",captions_language=\"en\")\n",
"youtube_loader_channel = GoogleApiYoutubeLoader(\n",
" google_api_client=google_api_client,\n",
" channel_name=\"Reducible\",\n",
" captions_language=\"en\",\n",
")\n",
"\n",
"# Use Youtube Ids\n",
"\n",
"youtube_loader_ids = GoogleApiYoutubeLoader(google_api_client=google_api_client, video_ids=[\"TrdevFK_am4\"], add_video_info=True)\n",
"youtube_loader_ids = GoogleApiYoutubeLoader(\n",
" google_api_client=google_api_client, video_ids=[\"TrdevFK_am4\"], add_video_info=True\n",
")\n",
"\n",
"# returns a list of Documents\n",
"youtube_loader_channel.load()"

View File

@@ -21,6 +21,7 @@
"from typing import List\n",
"from langchain.schema import Document\n",
"\n",
"\n",
"class BaseRetriever(ABC):\n",
" @abstractmethod\n",
" def get_relevant_documents(self, query: str) -> List[Document]:\n",
@@ -99,7 +100,8 @@
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"loader = TextLoader('../state_of_the_union.txt', encoding='utf8')"
"\n",
"loader = TextLoader(\"../state_of_the_union.txt\", encoding='utf8')"
]
},
{
@@ -299,6 +301,7 @@
"outputs": [],
"source": [
"from langchain.text_splitter import CharacterTextSplitter\n",
"\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"texts = text_splitter.split_documents(documents)"
]
@@ -319,6 +322,7 @@
"outputs": [],
"source": [
"from langchain.embeddings import OpenAIEmbeddings\n",
"\n",
"embeddings = OpenAIEmbeddings()"
]
},
@@ -347,6 +351,7 @@
],
"source": [
"from langchain.vectorstores import Chroma\n",
"\n",
"db = Chroma.from_documents(texts, embeddings)"
]
},
@@ -424,9 +429,9 @@
"outputs": [],
"source": [
"index_creator = VectorstoreIndexCreator(\n",
" vectorstore_cls=Chroma, \n",
" vectorstore_cls=Chroma,\n",
" embedding=OpenAIEmbeddings(),\n",
" text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
" text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0),\n",
")"
]
},

View File

@@ -37,7 +37,10 @@
"# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\n",
"\n",
"from langchain.document_loaders.csv_loader import CSVLoader\n",
"loader = CSVLoader(file_path='../../document_loaders/examples/example_data/mlb_teams_2012.csv')\n",
"\n",
"loader = CSVLoader(\n",
" file_path=\"../../document_loaders/examples/example_data/mlb_teams_2012.csv\"\n",
")\n",
"data = loader.load()\n",
"\n",
"\n",
@@ -48,16 +51,18 @@
"from langchain.docstore.document import Document\n",
"import json\n",
"\n",
"def write_json(path: str, documents: List[Document])-> None:\n",
"\n",
"def write_json(path: str, documents: List[Document]) -> None:\n",
" results = [{\"text\": doc.page_content} for doc in documents]\n",
" with open(path, \"w\") as f:\n",
" json.dump(results, f, indent=2)\n",
"\n",
"\n",
"write_json(\"foo.json\", data)\n",
"\n",
"# STEP 3: Use\n",
"\n",
"# Ingest this as you would any other json file in https://github.com/openai/chatgpt-retrieval-plugin/tree/main/scripts/process_json\n"
"# Ingest this as you would any other json file in https://github.com/openai/chatgpt-retrieval-plugin/tree/main/scripts/process_json"
]
},
{

View File

@@ -37,7 +37,7 @@
"metadata": {},
"outputs": [],
"source": [
"elasticsearch_url=\"http://localhost:9200\"\n",
"elasticsearch_url = \"http://localhost:9200\"\n",
"retriever = ElasticSearchBM25Retriever.create(elasticsearch_url, \"langchain-index-4\")"
]
},

View File

@@ -30,11 +30,12 @@
"outputs": [],
"source": [
"from metal_sdk.metal import Metal\n",
"\n",
"API_KEY = \"\"\n",
"CLIENT_ID = \"\"\n",
"APP_ID = \"\"\n",
"\n",
"metal = Metal(API_KEY, CLIENT_ID, APP_ID);\n"
"metal = Metal(API_KEY, CLIENT_ID, APP_ID);"
]
},
{
@@ -67,8 +68,8 @@
}
],
"source": [
"metal.index( {\"text\": \"foo1\"})\n",
"metal.index( {\"text\": \"foo\"})"
"metal.index({\"text\": \"foo1\"})\n",
"metal.index({\"text\": \"foo\"})"
]
},
{

Some files were not shown because too many files have changed in this diff Show More