mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 06:39:52 +00:00
cr
This commit is contained in:
commit
98afa924e9
@ -56,23 +56,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = FAISS.from_texts(texts, embeddings, metadatas=[{\"source\": i} for i in range(len(texts))])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5286f58f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"docsearch = FAISS.from_texts(texts, embeddings, metadatas=[{\"source\": i} for i in range(len(texts))])\n",
|
||||
"\n",
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"docs = docsearch.similarity_search(query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 4,
|
||||
"id": "005a47e9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -93,7 +85,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 5,
|
||||
"id": "fc1a5ed6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -103,7 +95,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 6,
|
||||
"id": "e239964b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -113,24 +105,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"id": "7d766417",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_text': ' The president did not mention Justice Breyer.\\nSOURCES: 0-pl, 1-pl, 2-pl'}"
|
||||
"{'answer': ' The president did not mention Justice Breyer.',\n",
|
||||
" 'sources': '0-pl, 1-pl, 2-pl'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
"chain.combine_and_parse(**{\"docs\": docs, \"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -145,7 +138,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 8,
|
||||
"id": "921db0a4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -155,31 +148,66 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"id": "e417926a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n",
|
||||
"Token indices sequence length is longer than the specified maximum sequence length for this model (1546 > 1024). Running this sequence through the model will result in indexing errors\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_text': ' The president did not mention Justice Breyer.\\nSOURCES: 0, 1, 2'}"
|
||||
"{'answer': ' The president did not mention Justice Breyer.',\n",
|
||||
" 'sources': '0, 1, 2'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"chain.combine_and_parse(**{\"docs\": docs, \"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ae2f6d97",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Intermediate Steps**\n",
|
||||
"\n",
|
||||
"We can also return the intermediate steps for `map_reduce` chains, should we want to inspect them. This is done with the `return_map_steps` variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "15af265f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "21b136e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'map_steps': [' None', ' None', ' None'],\n",
|
||||
" 'output_text': ' The president did not mention Justice Breyer.\\nSOURCES: 0, 1, 2'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
@ -195,7 +223,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 12,
|
||||
"id": "904835c8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -205,30 +233,75 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 15,
|
||||
"id": "f60875c6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament, which focused on building a coalition of freedom-loving nations to confront Putin, unifying European allies, countering Russia's lies with truth, and enforcing powerful economic sanctions. Source: 2\"}"
|
||||
"{'answer': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament. He spoke about the struggle of the Ukrainian people, the importance of the NATO Alliance, and the need for American diplomacy and resolve. He discussed Putin's premeditated and unprovoked attack on Ukraine, and the efforts to build a coalition of freedom-loving nations to confront Putin. He also discussed how the free world is holding Putin accountable, and the countries that are part of the coalition, including France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and Switzerland. Source: 1, 2\",\n",
|
||||
" 'sources': ''}"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"query = \"What did the president say about Justice Breyer\"\n",
|
||||
"chain.combine_and_parse(**{\"docs\": docs, \"question\": query})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac357530",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Intermediate Steps**\n",
|
||||
"\n",
|
||||
"We can also return the intermediate steps for `refine` chains, should we want to inspect them. This is done with the `return_refine_steps` variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "3396a773",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "be5739ef",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'refine_steps': ['\\nThe president did not mention Justice Breyer in the given context.',\n",
|
||||
" '\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament. He spoke about the struggle of the Ukrainian people, the importance of the NATO Alliance, and the need for American diplomacy and resolve. Source: 1',\n",
|
||||
" \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament. He spoke about the struggle of the Ukrainian people, the importance of the NATO Alliance, and the need for American diplomacy and resolve. He discussed Putin's premeditated and unprovoked attack on Ukraine, and the efforts to build a coalition of freedom-loving nations to confront Putin. He also discussed how the free world is holding Putin accountable, and the countries that are part of the coalition, including France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and Switzerland. Source: 1, 2\"],\n",
|
||||
" 'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament. He spoke about the struggle of the Ukrainian people, the importance of the NATO Alliance, and the need for American diplomacy and resolve. He discussed Putin's premeditated and unprovoked attack on Ukraine, and the efforts to build a coalition of freedom-loving nations to confront Putin. He also discussed how the free world is holding Putin accountable, and the countries that are part of the coalition, including France, Germany, Italy, the United Kingdom, Canada, Japan, Korea, Australia, New Zealand, and Switzerland. Source: 1, 2\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "929620d0",
|
||||
"id": "7355fedd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
@ -173,6 +173,51 @@
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "31478d32",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Intermediate Steps**\n",
|
||||
"\n",
|
||||
"We can also return the intermediate steps for `map_reduce` chains, should we want to inspect them. This is done with the `return_map_steps` variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "452c8680",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "90b47a75",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'map_steps': [{'text': ' \"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service.\"'},\n",
|
||||
" {'text': ' None'},\n",
|
||||
" {'text': ' None'},\n",
|
||||
" {'text': ' None'}],\n",
|
||||
" 'output_text': ' The president said, \"Justice Breyer, thank you for your service.\"'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6ea50ad0",
|
||||
@ -215,10 +260,55 @@
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f95dfb2e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Intermediate Steps**\n",
|
||||
"\n",
|
||||
"We can also return the intermediate steps for `refine` chains, should we want to inspect them. This is done with the `return_refine_steps` variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "a5c64200",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "817546ac",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'refine_steps': ['\\nThe president said that he wanted to honor Justice Breyer for his dedication to serving the country and his legacy of excellence.',\n",
|
||||
" '\\n\\nThe president said that he wanted to honor Justice Breyer for his dedication to serving the country, his legacy of excellence, and his commitment to advancing liberty and justice.',\n",
|
||||
" '\\n\\nThe president said that he wanted to honor Justice Breyer for his dedication to serving the country, his legacy of excellence, and his commitment to advancing liberty and justice, as well as for his commitment to protecting the rights of LGBTQ+ Americans and his support for the bipartisan Equality Act.',\n",
|
||||
" '\\n\\nThe president said that he wanted to honor Justice Breyer for his dedication to serving the country, his legacy of excellence, and his commitment to advancing liberty and justice, as well as for his commitment to protecting the rights of LGBTQ+ Americans and his support for the bipartisan Equality Act. He also mentioned his plan to lower costs to give families a fair shot, lower the deficit, and go after criminals who stole pandemic relief funds. He also announced that the Justice Department will name a chief prosecutor for pandemic fraud.'],\n",
|
||||
" 'output_text': '\\n\\nThe president said that he wanted to honor Justice Breyer for his dedication to serving the country, his legacy of excellence, and his commitment to advancing liberty and justice, as well as for his commitment to protecting the rights of LGBTQ+ Americans and his support for the bipartisan Equality Act. He also mentioned his plan to lower costs to give families a fair shot, lower the deficit, and go after criminals who stole pandemic relief funds. He also announced that the Justice Department will name a chief prosecutor for pandemic fraud.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input_documents\": docs, \"question\": query}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "49e9c6d7",
|
||||
"id": "97d335c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
@ -160,6 +160,50 @@
|
||||
"chain.run(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d0c2a6d3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Intermediate Steps**\n",
|
||||
"\n",
|
||||
"We can also return the intermediate steps for `map_reduce` chains, should we want to inspect them. This is done with the `return_map_steps` variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "d9cfc24e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"map_reduce\", return_map_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "c7dff5e8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'map_steps': [{'text': \" In response to Russia's aggression in Ukraine, the United States has united with other freedom-loving nations to impose economic sanctions and hold Putin accountable. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs and seize their ill-gotten gains.\"},\n",
|
||||
" {'text': ' The United States and its European allies are taking action to punish Russia for its invasion of Ukraine, including seizing assets, closing off airspace, and providing economic and military assistance to Ukraine. The US is also mobilizing forces to protect NATO countries and has released 30 million barrels of oil from its Strategic Petroleum Reserve to help blunt gas prices. The world is uniting in support of Ukraine and democracy, and the US stands with its Ukrainian allies.'},\n",
|
||||
" {'text': \" President Biden and Vice President Harris ran for office with a new economic vision for America, and have since passed the American Rescue Plan and the Bipartisan Infrastructure Law to help struggling families and rebuild America's infrastructure. This includes creating jobs, modernizing roads, airports, ports, and waterways, replacing lead pipes, providing affordable high-speed internet, and investing in American products to support American jobs.\"}],\n",
|
||||
" 'output_text': \" In response to Russia's aggression in Ukraine, the United States and its allies have imposed economic sanctions and are taking other measures to hold Putin accountable. The US is also providing economic and military assistance to Ukraine, protecting NATO countries, and passing legislation to help struggling families and rebuild America's infrastructure. The world is uniting in support of Ukraine and democracy, and the US stands with its Ukrainian allies.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input_documents\": docs}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f61350f9",
|
||||
@ -201,10 +245,54 @@
|
||||
"chain.run(docs)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84e9567e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**Intermediate Steps**\n",
|
||||
"\n",
|
||||
"We can also return the intermediate steps for `refine` chains, should we want to inspect them. This is done with the `return_refine_steps` variable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "cd49ac4d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = load_summarize_chain(OpenAI(temperature=0), chain_type=\"refine\", return_refine_steps=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "6a74029d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'refine_steps': [\" In response to Russia's aggression in Ukraine, the United States has united with other freedom-loving nations to impose economic sanctions and hold Putin accountable. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs and seize their ill-gotten gains.\",\n",
|
||||
" \"\\n\\nIn response to Russia's aggression in Ukraine, the United States has united with other freedom-loving nations to impose economic sanctions and hold Putin accountable. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs and seize their ill-gotten gains. We are joining with our European allies to find and seize the assets of Russian oligarchs, including yachts, luxury apartments, and private jets. The U.S. is also closing off American airspace to all Russian flights, further isolating Russia and adding an additional squeeze on their economy. The U.S. and its allies are providing support to the Ukrainians in their fight for freedom, including military, economic, and humanitarian assistance. The U.S. is also mobilizing ground forces, air squadrons, and ship deployments to protect NATO countries. The U.S. and its allies are also releasing 60 million barrels of oil from reserves around the world, with the U.S. contributing 30 million barrels from its own Strategic Petroleum Reserve. Putin's war on Ukraine has left Russia weaker and the rest of the world stronger, with the world uniting in support of democracy and peace.\",\n",
|
||||
" \"\\n\\nIn response to Russia's aggression in Ukraine, the United States has united with other freedom-loving nations to impose economic sanctions and hold Putin accountable. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs and seize their ill-gotten gains. We are joining with our European allies to find and seize the assets of Russian oligarchs, including yachts, luxury apartments, and private jets. The U.S. is also closing off American airspace to all Russian flights, further isolating Russia and adding an additional squeeze on their economy. The U.S. and its allies are providing support to the Ukrainians in their fight for freedom, including military, economic, and humanitarian assistance. The U.S. is also mobilizing ground forces, air squadrons, and ship deployments to protect NATO countries. The U.S. and its allies are also releasing 60 million barrels of oil from reserves around the world, with the U.S. contributing 30 million barrels from its own Strategic Petroleum Reserve. In addition, the U.S. has passed the American Rescue Plan to provide immediate economic relief for tens of millions of Americans, and the Bipartisan Infrastructure Law to rebuild America and create jobs. This includes investing\"],\n",
|
||||
" 'output_text': \"\\n\\nIn response to Russia's aggression in Ukraine, the United States has united with other freedom-loving nations to impose economic sanctions and hold Putin accountable. The U.S. Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs and seize their ill-gotten gains. We are joining with our European allies to find and seize the assets of Russian oligarchs, including yachts, luxury apartments, and private jets. The U.S. is also closing off American airspace to all Russian flights, further isolating Russia and adding an additional squeeze on their economy. The U.S. and its allies are providing support to the Ukrainians in their fight for freedom, including military, economic, and humanitarian assistance. The U.S. is also mobilizing ground forces, air squadrons, and ship deployments to protect NATO countries. The U.S. and its allies are also releasing 60 million barrels of oil from reserves around the world, with the U.S. contributing 30 million barrels from its own Strategic Petroleum Reserve. In addition, the U.S. has passed the American Rescue Plan to provide immediate economic relief for tens of millions of Americans, and the Bipartisan Infrastructure Law to rebuild America and create jobs. This includes investing\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain({\"input_documents\": docs}, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0da92750",
|
||||
"id": "db1ed69d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
@ -73,7 +73,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "8aa571ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -1,11 +1,22 @@
|
||||
Memory
|
||||
======
|
||||
|
||||
The examples here are all related to working with the concept of Memory in LangChain.
|
||||
The examples here all highlight how to use memory in different ways.
|
||||
|
||||
`Adding Memory <memory/adding_memory.ipynb>`_: How to add a memory component to any chain.
|
||||
|
||||
`Conversational Memory Types <memory/conversational_memory.ipynb>`_: An overview of the different types of conversation memory you can load and use with a conversation-like chain.
|
||||
|
||||
`Conversational Memory Customization <memory/conversational_customization.ipynb>`_: How to customize existing conversation memory components.
|
||||
|
||||
`Custom Memory <memory/custom_memory.ipynb>`_: How to write your own custom memory component.
|
||||
|
||||
`Adding Memory to Agents <memory/agent_with_memory.ipynb>`_: How to add a memory component to any agent.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
:caption: Memory
|
||||
:hidden:
|
||||
|
||||
memory/*
|
262
docs/examples/memory/conversational_customization.ipynb
Normal file
262
docs/examples/memory/conversational_customization.ipynb
Normal file
@ -0,0 +1,262 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "69e35d6f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Conversational Memory Customization\n",
|
||||
"\n",
|
||||
"This notebook walks through a few ways to customize conversational memory.\n",
|
||||
"\n",
|
||||
"The main way to do so is by changing the AI prefix in the conversation summary. By default, this is set to \"AI\", but you can set this to be anything you want. Note that if you change this, you should also change the prompt used in the chain to reflect this naming change. Let's walk through an example of that in the example below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0f964494",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.chains.conversation.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "d0e66d87",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Here it is by default set to \"AI\"\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=ConversationBufferMemory()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "f8fa6999",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! It's nice to meet you. How can I help you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "de213386",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hi there! It's nice to meet you. How can I help you today?\n",
|
||||
"Human: What's the weather?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The current weather is sunny and warm with a temperature of 75 degrees Fahrenheit. The forecast for the next few days is sunny with temperatures in the mid-70s.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"What's the weather?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "585949eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Now we can override it and set it to \"AI Assistant\"\n",
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"{history}\n",
|
||||
"Human: {input}\n",
|
||||
"AI Assistant:\"\"\"\n",
|
||||
"PROMPT = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"input\"], template=template\n",
|
||||
")\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" prompt=PROMPT,\n",
|
||||
" llm=llm, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=ConversationBufferMemory(ai_prefix=\"AI Assistant\")\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "1bb9bc53",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! It's nice to meet you. How can I help you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "d9241923",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI Assistant: Hi there! It's nice to meet you. How can I help you today?\n",
|
||||
"Human: What's the weather?\n",
|
||||
"AI Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The current weather is sunny and warm with a temperature of 75 degrees Fahrenheit. The forecast for the rest of the day is sunny with a high of 78 degrees and a low of 65 degrees.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"What's the weather?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1023b6ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
724
docs/examples/memory/conversational_memory.ipynb
Normal file
724
docs/examples/memory/conversational_memory.ipynb
Normal file
@ -0,0 +1,724 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d31df93e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Conversational Memory\n",
|
||||
"\n",
|
||||
"This notebook walks through the different types of memory you can use with the `ConversationChain`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d051c1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationBufferMemory (default)\n",
|
||||
"By default, the `ConversationChain` uses `ConversationBufferMemory`: a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "54301321",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.chains.conversation.memory import ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=ConversationBufferMemory()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ae046bff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! It's nice to meet you. How can I help you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d8e2a6ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hi there! It's nice to meet you. How can I help you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" That's great! It's always nice to have a conversation with someone new. What would you like to talk about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "15eda316",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hi there! It's nice to meet you. How can I help you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI: That's great! It's always nice to have a conversation with someone new. What would you like to talk about?\n",
|
||||
"Human: Tell me about yourself.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Sure! I'm an AI created to help people with their everyday tasks. I'm programmed to understand natural language and provide helpful information. I'm also constantly learning and updating my knowledge base so I can provide more accurate and helpful answers.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fad9448",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationSummaryMemory\n",
|
||||
"Now let's take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
|
||||
"\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f60a2fe8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversation.memory import ConversationSummaryMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b7274f2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" memory=ConversationSummaryMemory(llm=OpenAI()),\n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6b6b88f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human greeted the AI, to which the AI replied that it was doing great and was helping a customer with a technical issue.\n",
|
||||
"Human: Tell me more about it!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Sure! The customer was having trouble with their computer not connecting to the internet. I was able to help them troubleshoot the issue and get them connected. It was a great feeling to be able to help them out!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "dad869fe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human greeted the AI, to which the AI replied that it was doing great and was helping a customer with a technical issue. The AI explained the customer was having trouble with their computer not connecting to the internet and that it was a great feeling to be able to help them out and get them connected.\n",
|
||||
"Human: Very cool -- what is the scope of the project?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The scope of the project is to help the customer troubleshoot their computer and get it connected to the internet. I'm currently helping them identify the source of the issue and then providing them with the necessary steps to get their computer connected.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6eecf9d9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ConversationBufferWindowMemory\n",
|
||||
"\n",
|
||||
"`ConversationBufferWindowMemory` keeps a list of the interactions of the conversation over time. It only uses the last K interactions. This can be useful for keeping a sliding window of the most recent interactions, so the buffer does not get too large\n",
|
||||
"\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "2dac7769",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversation.memory import ConversationBufferWindowMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "0b9da4cd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" # We set a low k=2, to only keep the last 2 interactions in memory\n",
|
||||
" memory=ConversationBufferWindowMemory(k=2), \n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "90f73431",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\n",
|
||||
"Human: What's their issues?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"What's their issues?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "cbb499e7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\n",
|
||||
"Human: What's their issues?\n",
|
||||
"AI: The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.\n",
|
||||
"Human: Is it going well?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Yes, it's going well so far. We've already identified the problem and are now working on a solution.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Is it going well?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0d209cfe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: What's their issues?\n",
|
||||
"AI: The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.\n",
|
||||
"Human: Is it going well?\n",
|
||||
"AI: Yes, it's going well so far. We've already identified the problem and are now working on a solution.\n",
|
||||
"Human: What's the solution?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The solution is to reset the router and reconfigure the settings. We're currently in the process of doing that.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Notice here that the first interaction does not appear.\n",
|
||||
"conversation_with_summary.predict(input=\"What's the solution?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6d2569f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ConversationSummaryBufferMemory\n",
|
||||
"\n",
|
||||
"`ConversationSummaryBufferMemory` combines the last two ideas. It keeps a buffer of recent interactions in memory, but rather than just completely flushing old interactions it compiles them into a summary and uses both. Unlike the previous implementation though, it uses token length rather than number of interactions to determine when to flush interactions.\n",
|
||||
"\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "e583a661",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversation.memory import ConversationSummaryBufferMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "ebd68c10",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" # We set a very low max_token_limit for the purposes of testing.\n",
|
||||
" memory=ConversationSummaryBufferMemory(llm=OpenAI(), max_token_limit=40),\n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "86207a61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' That sounds like a lot of work. What kind of documentation are you writing?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Just working on writing some documentation!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "76a0ab39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human asked the AI what it was up to and the AI replied that it was helping a customer with a technical issue.\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI: That sounds like a lot of work. What kind of documentation are you writing?\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Yes, I have heard of LangChain. It is a blockchain-based language learning platform. Can you tell me more about the documentation you are writing?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see here that there is a summary of the conversation and then some previous interactions\n",
|
||||
"conversation_with_summary.predict(input=\"For LangChain! Have you heard of it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "8c669db1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human asked the AI what it was up to and the AI replied that it was helping a customer with a technical issue. The human mentioned they were writing documentation for LangChain, a blockchain-based language learning platform, and the AI had heard of it and asked for more information.\n",
|
||||
"\n",
|
||||
"Human: Haha nope, although a lot of people confuse it for that\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished ConversationChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Oh, I see. So what is LangChain then?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see here that the summary and the buffer are updated\n",
|
||||
"conversation_with_summary.predict(input=\"Haha nope, although a lot of people confuse it for that\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f71f40ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -1,333 +1,197 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d31df93e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of \"memory\" so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of \"short-term memory\". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of \"long-term memory\". For more concrete ideas on the latter, see this [awesome paper](https://memprompt.com/).\n",
|
||||
"\n",
|
||||
"LangChain provides several specially created chains just for this purpose. This notebook walks through using one of those chains (the `ConversationChain`) with two different types of memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d051c1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with default memory\n",
|
||||
"By default, the `ConversationChain` has a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ae046bff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Hello! How are you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import OpenAI, ConversationChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation = ConversationChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d8e2a6ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" That's great! What would you like to talk about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "15eda316",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI: That's great! What would you like to talk about?\n",
|
||||
"Human: Tell me about yourself.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' I am an AI created to provide information and support to humans. I enjoy learning and exploring new things.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fad9448",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with ConversationSummaryMemory\n",
|
||||
"Now let's take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
|
||||
"\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f60a2fe8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversation.memory import ConversationSummaryMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b7274f2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nI'm doing well, thank you for asking. I'm currently working on a project that I'm really excited about.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary = ConversationChain(llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True)\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6b6b88f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about.\n",
|
||||
"Human: Tell me more about it!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nI'm working on a project that I'm really excited about. It's a lot of work, but I think it's going to be really great when it's finished. I can't wait to show it to you!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "dad869fe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about. The AI said that the project is a lot of work, but it is going to be great when it is finished.\n",
|
||||
"Human: Very cool -- what is the scope of the project?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe project is quite large in scope. It involves a lot of data analysis and work with artificial intelligence algorithms.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c8735cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### More Resources on Memory\n",
|
||||
"\n",
|
||||
"This just scratches the surface of what you can do with memory. For more examples on things like how to implement custom memory classes, how to add memory to a custom LLM chain and how to use memory with an agent, please see the [How-To: Memory](../../examples/memory) section. For even more advanced ideas on memory (which will hopefully be included in LangChain soon!) see the [MemPrompt](https://memprompt.com/) paper."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "436dda66",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d31df93e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of \"memory\" so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of \"short-term memory\". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of \"long-term memory\". For more concrete ideas on the latter, see this [awesome paper](https://memprompt.com/).\n",
|
||||
"\n",
|
||||
"LangChain provides several specially created chains just for this purpose. This notebook walks through using one of those chains (the `ConversationChain`) with two different types of memory."
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d051c1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with default memory\n",
|
||||
"By default, the `ConversationChain` has a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ae046bff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Hello! How are you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import OpenAI, ConversationChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation = ConversationChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d8e2a6ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" That's great! What would you like to talk about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "15eda316",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI: That's great! What would you like to talk about?\n",
|
||||
"Human: Tell me about yourself.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' I am an AI created to provide information and support to humans. I enjoy learning and exploring new things.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c8735cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### More Resources on Memory\n",
|
||||
"\n",
|
||||
"This just scratches the surface of what you can do with memory. \n",
|
||||
"\n",
|
||||
"For more concrete examples of conversational memory, please this [this notebook](../../examples/memory/conversational_memory.ipynb)\n",
|
||||
"\n",
|
||||
"For more examples on things like how to implement custom memory classes, how to add memory to a custom LLM chain and how to use memory with an agent, please see the [How-To: Memory](../../examples/memory) section. \n",
|
||||
"\n",
|
||||
"For even more advanced ideas on memory (which will hopefully be included in LangChain soon!) see the [MemPrompt](https://memprompt.com/) paper."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "436dda66",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
@ -43,8 +43,8 @@ class BaseCombineDocumentsChain(Chain, BaseModel, ABC):
|
||||
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
|
||||
"""Combine documents into a single string."""
|
||||
|
||||
@abstractmethod
|
||||
@property
|
||||
@abstractmethod
|
||||
def output_parser(self) -> Optional[BaseOutputParser]:
|
||||
"""Output parser to use for results of combine_docs."""
|
||||
|
||||
|
@ -153,7 +153,8 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel):
|
||||
result_docs, **kwargs
|
||||
)
|
||||
if self.return_map_steps:
|
||||
extra_return_dict = {"map_steps": results}
|
||||
_results = [r[self.llm_chain.output_key] for r in results]
|
||||
extra_return_dict = {"map_steps": _results}
|
||||
else:
|
||||
extra_return_dict = {}
|
||||
output, _ = self.combine_document_chain.combine_docs(result_docs, **kwargs)
|
||||
|
@ -22,6 +22,8 @@ def _get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -
|
||||
class ConversationBufferMemory(Memory, BaseModel):
|
||||
"""Buffer for storing conversation memory."""
|
||||
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
buffer: str = ""
|
||||
memory_key: str = "history" #: :meta private:
|
||||
|
||||
@ -43,7 +45,7 @@ class ConversationBufferMemory(Memory, BaseModel):
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
human = "Human: " + inputs[prompt_input_key]
|
||||
ai = "AI: " + outputs[list(outputs.keys())[0]]
|
||||
ai = f"{self.ai_prefix}: " + outputs[list(outputs.keys())[0]]
|
||||
self.buffer += "\n" + "\n".join([human, ai])
|
||||
|
||||
def clear(self) -> None:
|
||||
@ -51,9 +53,11 @@ class ConversationBufferMemory(Memory, BaseModel):
|
||||
self.buffer = ""
|
||||
|
||||
|
||||
class ConversationalBufferWindowMemory(Memory, BaseModel):
|
||||
class ConversationBufferWindowMemory(Memory, BaseModel):
|
||||
"""Buffer for storing conversation memory."""
|
||||
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
buffer: List[str] = Field(default_factory=list)
|
||||
memory_key: str = "history" #: :meta private:
|
||||
k: int = 5
|
||||
@ -76,7 +80,7 @@ class ConversationalBufferWindowMemory(Memory, BaseModel):
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
human = "Human: " + inputs[prompt_input_key]
|
||||
ai = "AI: " + outputs[list(outputs.keys())[0]]
|
||||
ai = f"{self.ai_prefix}: " + outputs[list(outputs.keys())[0]]
|
||||
self.buffer.append("\n".join([human, ai]))
|
||||
|
||||
def clear(self) -> None:
|
||||
@ -84,10 +88,16 @@ class ConversationalBufferWindowMemory(Memory, BaseModel):
|
||||
self.buffer = []
|
||||
|
||||
|
||||
# For legacy naming reasons
|
||||
ConversationalBufferWindowMemory = ConversationBufferWindowMemory
|
||||
|
||||
|
||||
class ConversationSummaryMemory(Memory, BaseModel):
|
||||
"""Conversation summarizer to memory."""
|
||||
|
||||
buffer: str = ""
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
llm: BaseLLM
|
||||
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
||||
memory_key: str = "history" #: :meta private:
|
||||
@ -122,7 +132,7 @@ class ConversationSummaryMemory(Memory, BaseModel):
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
human = f"Human: {inputs[prompt_input_key]}"
|
||||
ai = f"AI: {list(outputs.values())[0]}"
|
||||
ai = f"{self.ai_prefix}: {list(outputs.values())[0]}"
|
||||
new_lines = "\n".join([human, ai])
|
||||
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
self.buffer = chain.predict(summary=self.buffer, new_lines=new_lines)
|
||||
@ -130,3 +140,73 @@ class ConversationSummaryMemory(Memory, BaseModel):
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = ""
|
||||
|
||||
|
||||
class ConversationSummaryBufferMemory(Memory, BaseModel):
|
||||
"""Buffer with summarizer for storing conversation memory."""
|
||||
|
||||
buffer: List[str] = Field(default_factory=list)
|
||||
max_token_limit: int = 2000
|
||||
moving_summary_buffer: str = ""
|
||||
llm: BaseLLM
|
||||
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
||||
memory_key: str = "history"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Return history buffer."""
|
||||
if self.moving_summary_buffer == "":
|
||||
return {self.memory_key: "\n".join(self.buffer)}
|
||||
memory_val = self.moving_summary_buffer + "\n" + "\n".join(self.buffer)
|
||||
return {self.memory_key: memory_val}
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
||||
"""Validate that prompt input variables are consistent."""
|
||||
prompt_variables = values["prompt"].input_variables
|
||||
expected_keys = {"summary", "new_lines"}
|
||||
if expected_keys != set(prompt_variables):
|
||||
raise ValueError(
|
||||
"Got unexpected prompt input variables. The prompt expects "
|
||||
f"{prompt_variables}, but it should have {expected_keys}."
|
||||
)
|
||||
return values
|
||||
|
||||
def get_num_tokens_list(self, arr: List[str]) -> List[int]:
|
||||
"""Get list of number of tokens in each string in the input array."""
|
||||
return [self.llm.get_num_tokens(x) for x in arr]
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
human = f"Human: {inputs[prompt_input_key]}"
|
||||
ai = f"{self.ai_prefix}: {list(outputs.values())[0]}"
|
||||
new_lines = "\n".join([human, ai])
|
||||
self.buffer.append(new_lines)
|
||||
# Prune buffer if it exceeds max token limit
|
||||
curr_buffer_length = sum(self.get_num_tokens_list(self.buffer))
|
||||
if curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory = []
|
||||
while curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory.append(self.buffer.pop(0))
|
||||
curr_buffer_length = sum(self.get_num_tokens_list(self.buffer))
|
||||
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
self.moving_summary_buffer = chain.predict(
|
||||
summary=self.moving_summary_buffer, new_lines=("\n".join(pruned_memory))
|
||||
)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = []
|
||||
self.moving_summary_buffer = ""
|
||||
|
@ -19,7 +19,7 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
|
||||
)
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.base import BasePromptTemplate, RegexParser
|
||||
|
||||
|
||||
class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
|
||||
@ -29,8 +29,6 @@ class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
|
||||
"""Chain to use to combine documents."""
|
||||
question_key: str = "question" #: :meta private:
|
||||
input_docs_key: str = "docs" #: :meta private:
|
||||
answer_key: str = "answer" #: :meta private:
|
||||
sources_answer_key: str = "sources" #: :meta private:
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
@ -79,7 +77,13 @@ class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.answer_key, self.sources_answer_key]
|
||||
output_parser = self.combine_document_chain.output_parser
|
||||
if not isinstance(output_parser, RegexParser):
|
||||
raise ValueError(
|
||||
"Output parser of combine_document_chain should be a RegexParser,"
|
||||
f" got {output_parser}"
|
||||
)
|
||||
return output_parser.output_keys
|
||||
|
||||
@root_validator(pre=True)
|
||||
def validate_question_chain(cls, values: Dict) -> Dict:
|
||||
@ -94,9 +98,14 @@ class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
|
||||
return values
|
||||
|
||||
@root_validator()
|
||||
def validate_combine_chain_can_be_constructed(cls, values: Dict) -> Dict:
|
||||
"""Validate that the combine chain can be constructed."""
|
||||
# Try to construct the combine documents chains.
|
||||
def validate_combine_chain_output(cls, values: Dict) -> Dict:
|
||||
"""Validate that the combine chain outputs a dictionary."""
|
||||
combine_docs_chain = values["combine_document_chain"]
|
||||
if not isinstance(combine_docs_chain.output_parser, RegexParser):
|
||||
raise ValueError(
|
||||
"Output parser of combine_document_chain should be a RegexParser,"
|
||||
f" got {combine_docs_chain.output_parser}"
|
||||
)
|
||||
|
||||
return values
|
||||
|
||||
@ -106,12 +115,8 @@ class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
docs = self._get_docs(inputs)
|
||||
answer, _ = self.combine_document_chain.combine_docs(docs, **inputs)
|
||||
if "\nSOURCES: " in answer:
|
||||
answer, sources = answer.split("\nSOURCES: ")
|
||||
else:
|
||||
sources = ""
|
||||
return {self.answer_key: answer, self.sources_answer_key: sources}
|
||||
answer, _ = self.combine_document_chain.combine_and_parse(docs, **inputs)
|
||||
return answer
|
||||
|
||||
|
||||
class QAWithSourcesChain(BaseQAWithSourcesChain, BaseModel):
|
||||
|
@ -1,5 +1,12 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts.base import RegexParser
|
||||
|
||||
output_parser = RegexParser(
|
||||
regex=r"(.*?)\nSOURCES: (.*)",
|
||||
output_keys=["answer", "sources"],
|
||||
default_output_key="answer",
|
||||
)
|
||||
|
||||
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
|
||||
Return any relevant text verbatim.
|
||||
@ -46,7 +53,9 @@ QUESTION: {question}
|
||||
=========
|
||||
FINAL ANSWER:"""
|
||||
COMBINE_PROMPT = PromptTemplate(
|
||||
template=combine_prompt_template, input_variables=["summaries", "question"]
|
||||
template=combine_prompt_template,
|
||||
input_variables=["summaries", "question"],
|
||||
output_parser=output_parser,
|
||||
)
|
||||
|
||||
EXAMPLE_PROMPT = PromptTemplate(
|
||||
|
@ -1,5 +1,12 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts.base import RegexParser
|
||||
|
||||
output_parser = RegexParser(
|
||||
regex=r"(.*?)\nSOURCES: (.*)",
|
||||
output_keys=["answer", "sources"],
|
||||
default_output_key="answer",
|
||||
)
|
||||
|
||||
DEFAULT_REFINE_PROMPT_TMPL = (
|
||||
"The original question is as follows: {question}\n"
|
||||
@ -17,6 +24,7 @@ DEFAULT_REFINE_PROMPT_TMPL = (
|
||||
DEFAULT_REFINE_PROMPT = PromptTemplate(
|
||||
input_variables=["question", "existing_answer", "context_str"],
|
||||
template=DEFAULT_REFINE_PROMPT_TMPL,
|
||||
output_parser=output_parser,
|
||||
)
|
||||
|
||||
|
||||
|
@ -1,5 +1,12 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts.base import RegexParser
|
||||
|
||||
output_parser = RegexParser(
|
||||
regex=r"(.*?)\nSOURCES: (.*)",
|
||||
output_keys=["answer", "sources"],
|
||||
default_output_key="answer",
|
||||
)
|
||||
|
||||
template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
|
||||
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
||||
@ -36,7 +43,11 @@ QUESTION: {question}
|
||||
{summaries}
|
||||
=========
|
||||
FINAL ANSWER:"""
|
||||
PROMPT = PromptTemplate(template=template, input_variables=["summaries", "question"])
|
||||
PROMPT = PromptTemplate(
|
||||
template=template,
|
||||
input_variables=["summaries", "question"],
|
||||
output_parser=output_parser,
|
||||
)
|
||||
|
||||
EXAMPLE_PROMPT = PromptTemplate(
|
||||
template="Content: {page_content}\nSource: {source}",
|
||||
|
@ -1,24 +1,6 @@
|
||||
# flake8: noqa
|
||||
import re
|
||||
from typing import Dict
|
||||
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.prompts.base import BaseOutputParser
|
||||
|
||||
|
||||
class QAGenerationOutputParser(BaseOutputParser):
|
||||
"""Parse output in question/answer pair."""
|
||||
|
||||
def parse(self, text: str) -> Dict[str, str]:
|
||||
regex = r"QUESTION: (.*?)\nANSWER: (.*)"
|
||||
match = re.search(regex, text)
|
||||
if match:
|
||||
question = match.group(1)
|
||||
answer = match.group(2)
|
||||
return {"query": question, "answer": answer}
|
||||
else:
|
||||
raise ValueError(f"Could not parse output: {text}")
|
||||
|
||||
from langchain.prompts.base import RegexParser
|
||||
|
||||
template = """You are a teacher coming up with questions to ask on a quiz.
|
||||
Given the following document, please generate a question and answer based on that document.
|
||||
@ -35,6 +17,9 @@ These questions should be detailed and be based explicitly on information in the
|
||||
<Begin Document>
|
||||
{doc}
|
||||
<End Document>"""
|
||||
PROMPT = PromptTemplate(
|
||||
input_variables=["doc"], template=template, output_parser=QAGenerationOutputParser()
|
||||
output_parser = RegexParser(
|
||||
regex=r"QUESTION: (.*?)\nANSWER: (.*)", output_keys=["question", "answer"]
|
||||
)
|
||||
PROMPT = PromptTemplate(
|
||||
input_variables=["doc"], template=template, output_parser=output_parser
|
||||
)
|
||||
|
@ -1,5 +1,6 @@
|
||||
"""BasePrompt schema definition."""
|
||||
import json
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
@ -55,7 +56,7 @@ class BaseOutputParser(ABC):
|
||||
"""Parse the output of an LLM call."""
|
||||
|
||||
|
||||
class ListOutputParser(ABC):
|
||||
class ListOutputParser(BaseOutputParser):
|
||||
"""Class to parse the output of an LLM call to a list."""
|
||||
|
||||
@abstractmethod
|
||||
@ -63,6 +64,28 @@ class ListOutputParser(ABC):
|
||||
"""Parse the output of an LLM call."""
|
||||
|
||||
|
||||
class RegexParser(BaseOutputParser, BaseModel):
|
||||
"""Class to parse the output into a dictionary."""
|
||||
|
||||
regex: str
|
||||
output_keys: List[str]
|
||||
default_output_key: Optional[str] = None
|
||||
|
||||
def parse(self, text: str) -> Dict[str, str]:
|
||||
"""Parse the output of an LLM call."""
|
||||
match = re.search(self.regex, text)
|
||||
if match:
|
||||
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
|
||||
else:
|
||||
if self.default_output_key is None:
|
||||
raise ValueError(f"Could not parse output: {text}")
|
||||
else:
|
||||
return {
|
||||
key: text if key == self.default_output_key else ""
|
||||
for key in self.output_keys
|
||||
}
|
||||
|
||||
|
||||
class BasePromptTemplate(BaseModel, ABC):
|
||||
"""Base prompt should expose the format method, returning a prompt."""
|
||||
|
||||
|
@ -92,6 +92,16 @@ class SerpAPIWrapper(BaseModel):
|
||||
toret = res["answer_box"]["snippet_highlighted_words"][0]
|
||||
elif "snippet" in res["organic_results"][0].keys():
|
||||
toret = res["organic_results"][0]["snippet"]
|
||||
elif (
|
||||
"sports_results" in res.keys()
|
||||
and "game_spotlight" in res["sports_results"].keys()
|
||||
):
|
||||
toret = res["sports_results"]["game_spotlight"]
|
||||
elif (
|
||||
"knowledge_graph" in res.keys()
|
||||
and "description" in res["knowledge_graph"].keys()
|
||||
):
|
||||
toret = res["knowledge_graph"]["description"]
|
||||
else:
|
||||
toret = "No good search result found"
|
||||
return toret
|
||||
|
@ -2,6 +2,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterable, List, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.embeddings.base import Embeddings
|
||||
@ -52,8 +53,23 @@ class Weaviate(VectorStore):
|
||||
def add_texts(
|
||||
self, texts: Iterable[str], metadatas: Optional[List[dict]] = None
|
||||
) -> List[str]:
|
||||
"""Not implemented for Weaviate yet."""
|
||||
raise NotImplementedError("weaviate does not currently support `add_texts`.")
|
||||
"""Upload texts with metadata (properties) to Weaviate."""
|
||||
from weaviate.util import get_valid_uuid
|
||||
|
||||
with self._client.batch as batch:
|
||||
ids = []
|
||||
for i, doc in enumerate(texts):
|
||||
data_properties = {
|
||||
self._text_key: doc,
|
||||
}
|
||||
if metadatas is not None:
|
||||
for key in metadatas[i].keys():
|
||||
data_properties[key] = metadatas[i][key]
|
||||
|
||||
_id = get_valid_uuid(uuid4())
|
||||
batch.add_data_object(data_properties, self._index_name, _id)
|
||||
ids.append(_id)
|
||||
return ids
|
||||
|
||||
def similarity_search(self, query: str, k: int = 4) -> List[Document]:
|
||||
"""Look up similar documents in weaviate."""
|
||||
|
@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "langchain"
|
||||
version = "0.0.48"
|
||||
version = "0.0.49"
|
||||
description = "Building applications with LLMs through composability"
|
||||
authors = []
|
||||
license = "MIT"
|
||||
|
31
tests/integration_tests/chains/test_memory.py
Normal file
31
tests/integration_tests/chains/test_memory.py
Normal file
@ -0,0 +1,31 @@
|
||||
"""Test memory functionality."""
|
||||
from langchain.chains.conversation.memory import ConversationSummaryBufferMemory
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
|
||||
def test_summary_buffer_memory_no_buffer_yet() -> None:
|
||||
"""Test ConversationSummaryBufferMemory when no inputs put in buffer yet."""
|
||||
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
|
||||
output = memory.load_memory_variables({})
|
||||
assert output == {"baz": ""}
|
||||
|
||||
|
||||
def test_summary_buffer_memory_buffer_only() -> None:
|
||||
"""Test ConversationSummaryBufferMemory when only buffer."""
|
||||
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
|
||||
memory.save_context({"input": "bar"}, {"output": "foo"})
|
||||
assert memory.buffer == ["Human: bar\nAI: foo"]
|
||||
output = memory.load_memory_variables({})
|
||||
assert output == {"baz": "Human: bar\nAI: foo"}
|
||||
|
||||
|
||||
def test_summary_buffer_memory_summary() -> None:
|
||||
"""Test ConversationSummaryBufferMemory when only buffer."""
|
||||
memory = ConversationSummaryBufferMemory(
|
||||
llm=FakeLLM(), memory_key="baz", max_token_limit=13
|
||||
)
|
||||
memory.save_context({"input": "bar"}, {"output": "foo"})
|
||||
memory.save_context({"input": "bar1"}, {"output": "foo1"})
|
||||
assert memory.buffer == ["Human: bar1\nAI: foo1"]
|
||||
output = memory.load_memory_variables({})
|
||||
assert output == {"baz": "foo\nHuman: bar1\nAI: foo1"}
|
@ -4,14 +4,21 @@ import pytest
|
||||
from langchain.chains.base import Memory
|
||||
from langchain.chains.conversation.base import ConversationChain
|
||||
from langchain.chains.conversation.memory import (
|
||||
ConversationalBufferWindowMemory,
|
||||
ConversationBufferMemory,
|
||||
ConversationBufferWindowMemory,
|
||||
ConversationSummaryMemory,
|
||||
)
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
|
||||
def test_memory_ai_prefix() -> None:
|
||||
"""Test that ai_prefix in the memory component works."""
|
||||
memory = ConversationBufferMemory(memory_key="foo", ai_prefix="Assistant")
|
||||
memory.save_context({"input": "bar"}, {"output": "foo"})
|
||||
assert memory.buffer == "\nHuman: bar\nAssistant: foo"
|
||||
|
||||
|
||||
def test_conversation_chain_works() -> None:
|
||||
"""Test that conversation chain works in basic setting."""
|
||||
llm = FakeLLM()
|
||||
@ -42,6 +49,7 @@ def test_conversation_chain_errors_bad_variable() -> None:
|
||||
"memory",
|
||||
[
|
||||
ConversationBufferMemory(memory_key="baz"),
|
||||
ConversationBufferWindowMemory(memory_key="baz"),
|
||||
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
|
||||
],
|
||||
)
|
||||
@ -74,14 +82,14 @@ def test_conversation_memory(memory: Memory) -> None:
|
||||
[
|
||||
ConversationBufferMemory(memory_key="baz"),
|
||||
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
|
||||
ConversationalBufferWindowMemory(memory_key="baz"),
|
||||
ConversationBufferWindowMemory(memory_key="baz"),
|
||||
],
|
||||
)
|
||||
def test_clearing_conversation_memory(memory: Memory) -> None:
|
||||
"""Test clearing the conversation memory."""
|
||||
# This is a good input because the input is not the same as baz.
|
||||
good_inputs = {"foo": "bar", "baz": "foo"}
|
||||
# This is a good output because these is one variable.
|
||||
# This is a good output because there is one variable.
|
||||
good_outputs = {"bar": "foo"}
|
||||
memory.save_context(good_inputs, good_outputs)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user