mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-11 13:55:03 +00:00
stop using chained input except in agent (#249)
This commit is contained in:
parent
b5d8434a50
commit
9481a23314
@ -7,7 +7,7 @@ from langchain.chains.base import Chain
|
|||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.chains.llm_math.prompt import PROMPT
|
from langchain.chains.llm_math.prompt import PROMPT
|
||||||
from langchain.chains.python import PythonChain
|
from langchain.chains.python import PythonChain
|
||||||
from langchain.input import ChainedInput
|
from langchain.input import print_text
|
||||||
from langchain.llms.base import LLM
|
from langchain.llms.base import LLM
|
||||||
|
|
||||||
|
|
||||||
@ -51,15 +51,18 @@ class LLMMathChain(Chain, BaseModel):
|
|||||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||||
llm_executor = LLMChain(prompt=PROMPT, llm=self.llm)
|
llm_executor = LLMChain(prompt=PROMPT, llm=self.llm)
|
||||||
python_executor = PythonChain()
|
python_executor = PythonChain()
|
||||||
chained_input = ChainedInput(inputs[self.input_key], verbose=self.verbose)
|
if self.verbose:
|
||||||
t = llm_executor.predict(question=chained_input.input, stop=["```output"])
|
print_text(inputs[self.input_key])
|
||||||
chained_input.add(t, color="green")
|
t = llm_executor.predict(question=inputs[self.input_key], stop=["```output"])
|
||||||
|
if self.verbose:
|
||||||
|
print_text(t, color="green")
|
||||||
t = t.strip()
|
t = t.strip()
|
||||||
if t.startswith("```python"):
|
if t.startswith("```python"):
|
||||||
code = t[9:-4]
|
code = t[9:-4]
|
||||||
output = python_executor.run(code)
|
output = python_executor.run(code)
|
||||||
chained_input.add("\nAnswer: ")
|
if self.verbose:
|
||||||
chained_input.add(output, color="yellow")
|
print_text("\nAnswer: ")
|
||||||
|
print_text(output, color="yellow")
|
||||||
answer = "Answer: " + output
|
answer = "Answer: " + output
|
||||||
elif t.startswith("Answer:"):
|
elif t.startswith("Answer:"):
|
||||||
answer = t
|
answer = t
|
||||||
|
@ -6,7 +6,7 @@ from pydantic import BaseModel, Extra
|
|||||||
from langchain.chains.base import Chain
|
from langchain.chains.base import Chain
|
||||||
from langchain.chains.llm import LLMChain
|
from langchain.chains.llm import LLMChain
|
||||||
from langchain.chains.sql_database.prompt import PROMPT
|
from langchain.chains.sql_database.prompt import PROMPT
|
||||||
from langchain.input import ChainedInput
|
from langchain.input import print_text
|
||||||
from langchain.llms.base import LLM
|
from langchain.llms.base import LLM
|
||||||
from langchain.sql_database import SQLDatabase
|
from langchain.sql_database import SQLDatabase
|
||||||
|
|
||||||
@ -53,22 +53,26 @@ class SQLDatabaseChain(Chain, BaseModel):
|
|||||||
|
|
||||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||||
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
||||||
chained_input = ChainedInput(
|
input_text = f"{inputs[self.input_key]} \nSQLQuery:"
|
||||||
f"{inputs[self.input_key]} \nSQLQuery:", verbose=self.verbose
|
if self.verbose:
|
||||||
)
|
print_text(input_text)
|
||||||
llm_inputs = {
|
llm_inputs = {
|
||||||
"input": chained_input.input,
|
"input": input_text,
|
||||||
"dialect": self.database.dialect,
|
"dialect": self.database.dialect,
|
||||||
"table_info": self.database.table_info,
|
"table_info": self.database.table_info,
|
||||||
"stop": ["\nSQLResult:"],
|
"stop": ["\nSQLResult:"],
|
||||||
}
|
}
|
||||||
sql_cmd = llm_chain.predict(**llm_inputs)
|
sql_cmd = llm_chain.predict(**llm_inputs)
|
||||||
chained_input.add(sql_cmd, color="green")
|
if self.verbose:
|
||||||
|
print_text(sql_cmd, color="green")
|
||||||
result = self.database.run(sql_cmd)
|
result = self.database.run(sql_cmd)
|
||||||
chained_input.add("\nSQLResult: ")
|
if self.verbose:
|
||||||
chained_input.add(result, color="yellow")
|
print_text("\nSQLResult: ")
|
||||||
chained_input.add("\nAnswer:")
|
print_text(result, color="yellow")
|
||||||
llm_inputs["input"] = chained_input.input
|
print_text("\nAnswer:")
|
||||||
|
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
|
||||||
|
llm_inputs["input"] = input_text
|
||||||
final_result = llm_chain.predict(**llm_inputs)
|
final_result = llm_chain.predict(**llm_inputs)
|
||||||
chained_input.add(final_result, color="green")
|
if self.verbose:
|
||||||
|
print_text(final_result, color="green")
|
||||||
return {self.output_key: final_result}
|
return {self.output_key: final_result}
|
||||||
|
@ -36,13 +36,13 @@ class ChainedInput:
|
|||||||
"""Initialize with verbose flag and initial text."""
|
"""Initialize with verbose flag and initial text."""
|
||||||
self._verbose = verbose
|
self._verbose = verbose
|
||||||
if self._verbose:
|
if self._verbose:
|
||||||
print_text(text, None)
|
print_text(text, color=None)
|
||||||
self._input = text
|
self._input = text
|
||||||
|
|
||||||
def add(self, text: str, color: Optional[str] = None) -> None:
|
def add(self, text: str, color: Optional[str] = None) -> None:
|
||||||
"""Add text to input, print if in verbose mode."""
|
"""Add text to input, print if in verbose mode."""
|
||||||
if self._verbose:
|
if self._verbose:
|
||||||
print_text(text, color)
|
print_text(text, color=color)
|
||||||
self._input += text
|
self._input += text
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
Loading…
Reference in New Issue
Block a user