mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-26 16:43:35 +00:00
parent
dfb81c969f
commit
4cc18d6c2a
@ -6,6 +6,24 @@
|
||||
"id": "44e9ba31",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"How many of the integers between 0 and 99 inclusive are divisible by 8?\u001b[102m\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"count = 0\n",
|
||||
"for i in range(100):\n",
|
||||
" if i % 8 == 0:\n",
|
||||
" count += 1\n",
|
||||
"print(count)\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[103m13\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
@ -21,7 +39,7 @@
|
||||
"from langchain import OpenAI, LLMMathChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math = LLMMathChain(llm=llm)\n",
|
||||
"llm_math = LLMMathChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"llm_math.run(\"How many of the integers between 0 and 99 inclusive are divisible by 8?\")"
|
||||
]
|
||||
|
@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"id": "4e272b47",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -10,12 +10,12 @@
|
||||
"from langchain import OpenAI, ReActChain, Wikipedia\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"react = ReActChain(llm=llm, docstore=Wikipedia())"
|
||||
"react = ReActChain(llm=llm, docstore=Wikipedia(), verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"id": "8078c8f1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -23,11 +23,15 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Search David Chanoff\n",
|
||||
"David Chanoff is a noted author of non-fiction work. His work has typically involved collaborations with the principal protagonist of the work concerned. His collaborators have included; Augustus A. White, Joycelyn Elders, Đoàn Văn Toại, William J. Crowe, Ariel Sharon, Kenneth Good and Felix Zandman. He has also written about a wide range of subjects including literary history, education and foreign for The Washington Post, The New Republic and The New York Times Magazine. He has published more than twelve books.\n",
|
||||
"Search William J. Crowe\n",
|
||||
"William James Crowe Jr. (January 2, 1925 – October 18, 2007) was a United States Navy admiral and diplomat who served as the 11th chairman of the Joint Chiefs of Staff under Presidents Ronald Reagan and George H. W. Bush, and as the ambassador to the United Kingdom and Chair of the Intelligence Oversight Board under President Bill Clinton.\n",
|
||||
"Finish Bill Clinton\n"
|
||||
"\u001b[102m I need to search David Chanoff and find the U.S. Navy admiral he\n",
|
||||
"collaborated with.\n",
|
||||
"Action 1: Search[David Chanoff]\u001b[0m\u001b[49m\n",
|
||||
"Observation 1: \u001b[0m\u001b[103mDavid Chanoff is a noted author of non-fiction work. His work has typically involved collaborations with the principal protagonist of the work concerned. His collaborators have included; Augustus A. White, Joycelyn Elders, Đoàn Văn Toại, William J. Crowe, Ariel Sharon, Kenneth Good and Felix Zandman. He has also written about a wide range of subjects including literary history, education and foreign for The Washington Post, The New Republic and The New York Times Magazine. He has published more than twelve books.\u001b[0m\u001b[49m\n",
|
||||
"Thought 2:\u001b[0m\u001b[102m The U.S. Navy admiral David Chanoff collaborated with is William J. Crowe.\n",
|
||||
"Action 2: Search[William J. Crowe]\u001b[0m\u001b[49m\n",
|
||||
"Observation 2: \u001b[0m\u001b[103mWilliam James Crowe Jr. (January 2, 1925 – October 18, 2007) was a United States Navy admiral and diplomat who served as the 11th chairman of the Joint Chiefs of Staff under Presidents Ronald Reagan and George H. W. Bush, and as the ambassador to the United Kingdom and Chair of the Intelligence Oversight Board under President Bill Clinton.\u001b[0m\u001b[49m\n",
|
||||
"Thought 3:\u001b[0m\u001b[102m William J. Crowe served as the ambassador to the United Kingdom under President Bill Clinton. So the answer is Bill Clinton.\n",
|
||||
"Action 3: Finish[Bill Clinton]\u001b[0m"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -36,7 +40,7 @@
|
||||
"'Bill Clinton'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -10,19 +10,19 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
||||
"Are follow up questions needed here:\u001b[102m Yes.\n",
|
||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[106mCarlos Alcaraz\u001b[0m.\u001b[102m\n",
|
||||
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[106mEl Palmar, Murcia, Spain\u001b[0m.\u001b[102m\n",
|
||||
"\u001b[49m\n",
|
||||
"Are follow up questions needed here:\u001b[0m\u001b[102m Yes.\n",
|
||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\u001b[49m\n",
|
||||
"Intermediate answer: \u001b[0m\u001b[103mCarlos Alcaraz.\u001b[0m\u001b[102m\n",
|
||||
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\u001b[49m\n",
|
||||
"Intermediate answer: \u001b[0m\u001b[103mEl Palmar, Murcia, Spain.\u001b[0m\u001b[102m\n",
|
||||
"So the final answer is: El Palmar, Murcia, Spain\u001b[0m"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"What is the hometown of the reigning men's U.S. Open champion?\\nAre follow up questions needed here: Yes.\\nFollow up: Who is the reigning men's U.S. Open champion?\\nIntermediate answer: Carlos Alcaraz.\\nFollow up: Where is Carlos Alcaraz from?\\nIntermediate answer: El Palmar, Murcia, Spain.\\nSo the final answer is: El Palmar, Murcia, Spain\""
|
||||
"'\\nSo the final answer is: El Palmar, Murcia, Spain'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
@ -36,7 +36,7 @@
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"search = SerpAPIChain()\n",
|
||||
"\n",
|
||||
"self_ask_with_search = SelfAskWithSearchChain(llm=llm, search_chain=search)\n",
|
||||
"self_ask_with_search = SelfAskWithSearchChain(llm=llm, search_chain=search, verbose=True)\n",
|
||||
"\n",
|
||||
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
|
||||
]
|
||||
|
@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 1,
|
||||
"id": "51a54c4d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -12,7 +12,7 @@
|
||||
"' The year Justin Beiber was born was 1994. In 1994, the Dallas Cowboys won the Super Bowl.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -28,7 +28,7 @@
|
||||
"source": [
|
||||
"db = SQLDatabase.from_uri(\"sqlite:///../notebooks/Chinook.db\")\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db)"
|
||||
"db_chain = SQLDatabaseChain(llm=llm, database=db, verbose=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -41,8 +41,9 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" SELECT COUNT(*) FROM Employee\n",
|
||||
"[(8,)]\n"
|
||||
"\u001b[102m SELECT COUNT(*) FROM Employee\u001b[0m\u001b[49m\n",
|
||||
"SQLResult: \u001b[0m\u001b[103m[(8,)]\u001b[0m\u001b[49m\n",
|
||||
"Answer:\u001b[0m\u001b[102m There are 8 employees.\u001b[0m"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -85,7 +86,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.7"
|
||||
"version": "3.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -7,6 +7,7 @@ from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_math.prompt import PROMPT
|
||||
from langchain.chains.python import PythonChain
|
||||
from langchain.input import ChainedInput
|
||||
from langchain.llms.base import LLM
|
||||
|
||||
|
||||
@ -52,14 +53,15 @@ class LLMMathChain(Chain, BaseModel):
|
||||
def _run(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
llm_executor = LLMChain(prompt=PROMPT, llm=self.llm)
|
||||
python_executor = PythonChain()
|
||||
question = inputs[self.input_key]
|
||||
t = llm_executor.predict(question=question, stop=["```output"]).strip()
|
||||
chained_input = ChainedInput(inputs[self.input_key], verbose=self.verbose)
|
||||
t = llm_executor.predict(question=chained_input.input, stop=["```output"])
|
||||
chained_input.add(t, color="green")
|
||||
t = t.strip()
|
||||
if t.startswith("```python"):
|
||||
code = t[9:-4]
|
||||
if self.verbose:
|
||||
print("[DEBUG] evaluating code")
|
||||
print(code)
|
||||
output = python_executor.run(code)
|
||||
chained_input.add("\nAnswer: ")
|
||||
chained_input.add(output, color="yellow")
|
||||
answer = "Answer: " + output
|
||||
elif t.startswith("Answer:"):
|
||||
answer = t
|
||||
|
@ -9,6 +9,7 @@ from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.react.prompt import PROMPT
|
||||
from langchain.docstore.base import Docstore
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.input import ChainedInput
|
||||
from langchain.llms.base import LLM
|
||||
|
||||
|
||||
@ -48,6 +49,7 @@ class ReActChain(Chain, BaseModel):
|
||||
"""LLM wrapper to use."""
|
||||
docstore: Docstore
|
||||
"""Docstore to use."""
|
||||
verbose: bool = False
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
@ -76,15 +78,14 @@ class ReActChain(Chain, BaseModel):
|
||||
def _run(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
question = inputs[self.input_key]
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
||||
prompt = f"{question}\nThought 1:"
|
||||
chained_input = ChainedInput(f"{question}\nThought 1:", verbose=self.verbose)
|
||||
i = 1
|
||||
document = None
|
||||
while True:
|
||||
ret_text, action, directive = predict_until_observation(
|
||||
llm_chain, prompt, i
|
||||
llm_chain, chained_input.input, i
|
||||
)
|
||||
prompt += ret_text
|
||||
print(action, directive)
|
||||
chained_input.add(ret_text, color="green")
|
||||
if action == "Search":
|
||||
result = self.docstore.search(directive)
|
||||
if isinstance(result, Document):
|
||||
@ -93,16 +94,17 @@ class ReActChain(Chain, BaseModel):
|
||||
else:
|
||||
document = None
|
||||
observation = result
|
||||
print(observation)
|
||||
elif action == "Lookup":
|
||||
if document is None:
|
||||
raise ValueError("Cannot lookup without a successful search first")
|
||||
observation = document.lookup(directive)
|
||||
elif action == "Finish":
|
||||
return {"full_logic": prompt, self.output_key: directive}
|
||||
return {"full_logic": chained_input.input, self.output_key: directive}
|
||||
else:
|
||||
raise ValueError(f"Got unknown action directive: {action}")
|
||||
prompt += f"\nObservation {i}: " + observation + f"\nThought {i + 1}:"
|
||||
chained_input.add(f"\nObservation {i}: ")
|
||||
chained_input.add(observation, color="yellow")
|
||||
chained_input.add(f"\nThought {i + 1}:")
|
||||
i += 1
|
||||
|
||||
def run(self, question: str) -> str:
|
||||
|
@ -7,6 +7,7 @@ from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.self_ask_with_search.prompt import PROMPT
|
||||
from langchain.chains.serpapi import SerpAPIChain
|
||||
from langchain.input import ChainedInput
|
||||
from langchain.llms.base import LLM
|
||||
|
||||
|
||||
@ -88,6 +89,7 @@ class SelfAskWithSearchChain(Chain, BaseModel):
|
||||
"""LLM wrapper to use."""
|
||||
search_chain: SerpAPIChain
|
||||
"""Search chain to use."""
|
||||
verbose: bool = False
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
@ -114,44 +116,38 @@ class SelfAskWithSearchChain(Chain, BaseModel):
|
||||
return [self.output_key]
|
||||
|
||||
def _run(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
question = inputs[self.input_key]
|
||||
chained_input = ChainedInput(inputs[self.input_key], verbose=self.verbose)
|
||||
chained_input.add("\nAre follow up questions needed here:")
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
||||
intermediate = "\nIntermediate answer:"
|
||||
followup = "Follow up:"
|
||||
finalans = "\nSo the final answer is:"
|
||||
cur_prompt = f"{question}\nAre follow up questions needed here:"
|
||||
print(cur_prompt, end="")
|
||||
ret_text = llm_chain.predict(input=cur_prompt, stop=[intermediate])
|
||||
print(greenify(ret_text), end="")
|
||||
ret_text = llm_chain.predict(input=chained_input.input, stop=[intermediate])
|
||||
chained_input.add(ret_text, color="green")
|
||||
while followup in get_last_line(ret_text):
|
||||
cur_prompt += ret_text
|
||||
question = extract_question(ret_text, followup)
|
||||
external_answer = self.search_chain.search(question)
|
||||
if external_answer is not None:
|
||||
cur_prompt += intermediate + " " + external_answer + "."
|
||||
print(
|
||||
intermediate + " " + yellowfy(external_answer) + ".",
|
||||
end="",
|
||||
)
|
||||
chained_input.add(intermediate + " ")
|
||||
chained_input.add(external_answer + ".", color="yellow")
|
||||
ret_text = llm_chain.predict(
|
||||
input=cur_prompt, stop=["\nIntermediate answer:"]
|
||||
input=chained_input.input, stop=["\nIntermediate answer:"]
|
||||
)
|
||||
print(greenify(ret_text), end="")
|
||||
chained_input.add(ret_text, color="green")
|
||||
else:
|
||||
# We only get here in the very rare case that Google returns no answer.
|
||||
cur_prompt += intermediate
|
||||
print(intermediate + " ")
|
||||
cur_prompt += llm_chain.predict(
|
||||
input=cur_prompt, stop=["\n" + followup, finalans]
|
||||
chained_input.add(intermediate + " ")
|
||||
preds = llm_chain.predict(
|
||||
input=chained_input.input, stop=["\n" + followup, finalans]
|
||||
)
|
||||
chained_input.add(preds, color="green")
|
||||
|
||||
if finalans not in ret_text:
|
||||
cur_prompt += finalans
|
||||
print(finalans, end="")
|
||||
ret_text = llm_chain.predict(input=cur_prompt, stop=["\n"])
|
||||
print(greenify(ret_text), end="")
|
||||
chained_input.add(finalans)
|
||||
ret_text = llm_chain.predict(input=chained_input.input, stop=["\n"])
|
||||
chained_input.add(ret_text, color="green")
|
||||
|
||||
return {self.output_key: cur_prompt + ret_text}
|
||||
return {self.output_key: ret_text}
|
||||
|
||||
def run(self, question: str) -> str:
|
||||
"""Run self ask with search chain.
|
||||
|
@ -6,6 +6,7 @@ from pydantic import BaseModel, Extra
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.sql_database.prompt import PROMPT
|
||||
from langchain.input import ChainedInput
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.sql_database import SQLDatabase
|
||||
|
||||
@ -25,6 +26,7 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
"""LLM wrapper to use."""
|
||||
database: SQLDatabase
|
||||
"""SQL Database to connect to."""
|
||||
verbose: bool = False
|
||||
input_key: str = "query" #: :meta private:
|
||||
output_key: str = "result" #: :meta private:
|
||||
|
||||
@ -52,20 +54,24 @@ class SQLDatabaseChain(Chain, BaseModel):
|
||||
|
||||
def _run(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
||||
_input = inputs[self.input_key] + "\nSQLQuery:"
|
||||
chained_input = ChainedInput(
|
||||
inputs[self.input_key] + "\nSQLQuery:", verbose=self.verbose
|
||||
)
|
||||
llm_inputs = {
|
||||
"input": _input,
|
||||
"input": chained_input.input,
|
||||
"dialect": self.database.dialect,
|
||||
"table_info": self.database.table_info,
|
||||
"stop": ["\nSQLResult:"],
|
||||
}
|
||||
sql_cmd = llm_chain.predict(**llm_inputs)
|
||||
print(sql_cmd)
|
||||
chained_input.add(sql_cmd, color="green")
|
||||
result = self.database.run(sql_cmd)
|
||||
print(result)
|
||||
_input += f"\nSQLResult: {result}\nAnswer:"
|
||||
llm_inputs["input"] = _input
|
||||
chained_input.add("\nSQLResult: ")
|
||||
chained_input.add(result, color="yellow")
|
||||
chained_input.add("\nAnswer:")
|
||||
llm_inputs["input"] = chained_input.input
|
||||
final_result = llm_chain.predict(**llm_inputs)
|
||||
chained_input.add(final_result, color="green")
|
||||
return {self.output_key: final_result}
|
||||
|
||||
def query(self, query: str) -> str:
|
||||
|
35
langchain/input.py
Normal file
35
langchain/input.py
Normal file
@ -0,0 +1,35 @@
|
||||
"""Handle chained inputs."""
|
||||
from typing import Optional
|
||||
|
||||
_COLOR_MAPPING = {"blue": 104, "yellow": 103, "red": 101, "green": 102}
|
||||
|
||||
|
||||
def print_text(text: str, color: Optional[str] = None) -> None:
|
||||
"""Print text with highlighting and no end characters."""
|
||||
if color is None:
|
||||
print(text, end="")
|
||||
else:
|
||||
color_str = _COLOR_MAPPING[color]
|
||||
print(f"\x1b[{color_str}m{text}\x1b[0m", end="")
|
||||
|
||||
|
||||
class ChainedInput:
|
||||
"""Class for working with input that is the result of chains."""
|
||||
|
||||
def __init__(self, text: str, verbose: bool = False):
|
||||
"""Initialize with verbose flag and initial text."""
|
||||
self.verbose = verbose
|
||||
if self.verbose:
|
||||
print_text(text, None)
|
||||
self._input = text
|
||||
|
||||
def add(self, text: str, color: Optional[str] = None) -> None:
|
||||
"""Add text to input, print if in verbose mode."""
|
||||
if self.verbose:
|
||||
print_text(text, color)
|
||||
self._input += text
|
||||
|
||||
@property
|
||||
def input(self) -> str:
|
||||
"""Return the accumulated input."""
|
||||
return self._input
|
52
tests/unit_tests/test_input.py
Normal file
52
tests/unit_tests/test_input.py
Normal file
@ -0,0 +1,52 @@
|
||||
"""Test input manipulating logic."""
|
||||
|
||||
import sys
|
||||
from io import StringIO
|
||||
|
||||
from langchain.input import ChainedInput
|
||||
|
||||
|
||||
def test_chained_input_not_verbose() -> None:
|
||||
"""Test chained input logic."""
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = mystdout = StringIO()
|
||||
chained_input = ChainedInput("foo")
|
||||
sys.stdout = old_stdout
|
||||
output = mystdout.getvalue()
|
||||
assert output == ""
|
||||
assert chained_input.input == "foo"
|
||||
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = mystdout = StringIO()
|
||||
chained_input.add("bar")
|
||||
sys.stdout = old_stdout
|
||||
output = mystdout.getvalue()
|
||||
assert output == ""
|
||||
assert chained_input.input == "foobar"
|
||||
|
||||
|
||||
def test_chained_input_verbose() -> None:
|
||||
"""Test chained input logic, making sure verbose doesn't mess it up."""
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = mystdout = StringIO()
|
||||
chained_input = ChainedInput("foo", verbose=True)
|
||||
sys.stdout = old_stdout
|
||||
output = mystdout.getvalue()
|
||||
assert output == "foo"
|
||||
assert chained_input.input == "foo"
|
||||
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = mystdout = StringIO()
|
||||
chained_input.add("bar")
|
||||
sys.stdout = old_stdout
|
||||
output = mystdout.getvalue()
|
||||
assert output == "bar"
|
||||
assert chained_input.input == "foobar"
|
||||
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = mystdout = StringIO()
|
||||
chained_input.add("baz", color="blue")
|
||||
sys.stdout = old_stdout
|
||||
output = mystdout.getvalue()
|
||||
assert output == "\x1b[104mbaz\x1b[0m"
|
||||
assert chained_input.input == "foobarbaz"
|
Loading…
Reference in New Issue
Block a user