mirror of
https://github.com/hwchase17/langchain.git
synced 2025-04-29 04:16:02 +00:00
stash
This commit is contained in:
parent
45ce74d0bc
commit
2a84d3d5ca
@ -68,7 +68,7 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"id": "b5a265c6",
|
"id": "2d6f4b1d",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": []
|
"source": []
|
||||||
|
@ -20,23 +20,28 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||||
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
||||||
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
|
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
|
||||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
||||||
"Intermediate answer: \u001b[33;1m\u001b[1;3mCarlos Alcaraz.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
"Intermediate answer: \u001b[36;1m\u001b[1;3mCarlos Alcaraz\u001b[0m\n",
|
||||||
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
||||||
"Intermediate answer: \u001b[33;1m\u001b[1;3mEl Palmar, Spain.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
"Intermediate answer: \u001b[36;1m\u001b[1;3mEl Palmar, Spain\u001b[0m\n",
|
||||||
"So the final answer is: El Palmar, Spain\u001b[0m\n",
|
"\u001b[32;1m\u001b[1;3mSo the final answer is: El Palmar, Spain\u001b[0m\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
"\n",
|
||||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"'\\nSo the final answer is: El Palmar, Spain'"
|
"'El Palmar, Spain'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 1,
|
"execution_count": 1,
|
||||||
|
@ -10,6 +10,7 @@ from langchain.chains.router import LLMRouterChain
|
|||||||
from langchain.input import ChainedInput, get_color_mapping
|
from langchain.input import ChainedInput, get_color_mapping
|
||||||
from langchain.llms.base import LLM
|
from langchain.llms.base import LLM
|
||||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||||
|
from langchain.chains.router_expert import RouterExpertChain, ExpertConfig
|
||||||
|
|
||||||
FINAL_ANSWER_ACTION = "Final Answer: "
|
FINAL_ANSWER_ACTION = "Final Answer: "
|
||||||
|
|
||||||
@ -31,9 +32,9 @@ class ChainConfig(NamedTuple):
|
|||||||
def get_action_and_input(llm_output: str) -> Tuple[str, str]:
|
def get_action_and_input(llm_output: str) -> Tuple[str, str]:
|
||||||
"""Parse out the action and input from the LLM output."""
|
"""Parse out the action and input from the LLM output."""
|
||||||
ps = [p for p in llm_output.split("\n") if p]
|
ps = [p for p in llm_output.split("\n") if p]
|
||||||
if ps[-1].startswith(FINAL_ANSWER_ACTION):
|
if ps[-1].startswith("Final Answer"):
|
||||||
directive = ps[-1][len(FINAL_ANSWER_ACTION) :]
|
directive = ps[-1][len(FINAL_ANSWER_ACTION) :]
|
||||||
return FINAL_ANSWER_ACTION, directive
|
return "Final Answer", directive
|
||||||
if not ps[-1].startswith("Action Input: "):
|
if not ps[-1].startswith("Action Input: "):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"The last line does not have an action input, "
|
"The last line does not have an action input, "
|
||||||
@ -52,6 +53,16 @@ def get_action_and_input(llm_output: str) -> Tuple[str, str]:
|
|||||||
class MRKLRouterChain(LLMRouterChain):
|
class MRKLRouterChain(LLMRouterChain):
|
||||||
"""Router for the MRKL chain."""
|
"""Router for the MRKL chain."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def observation_prefix(self) -> str:
|
||||||
|
"""Prefix to append the observation with."""
|
||||||
|
return "Observation: "
|
||||||
|
|
||||||
|
@property
|
||||||
|
def router_prefix(self) -> str:
|
||||||
|
"""Prefix to append the router call with."""
|
||||||
|
return "Thought:"
|
||||||
|
|
||||||
def __init__(self, llm: LLM, chain_configs: List[ChainConfig], **kwargs: Any):
|
def __init__(self, llm: LLM, chain_configs: List[ChainConfig], **kwargs: Any):
|
||||||
"""Initialize with an LLM and the chain configs it has access to."""
|
"""Initialize with an LLM and the chain configs it has access to."""
|
||||||
tools = "\n".join(
|
tools = "\n".join(
|
||||||
@ -166,21 +177,15 @@ class MRKLChain(Chain, BaseModel):
|
|||||||
|
|
||||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||||
router_chain = MRKLRouterChain(self.llm, self.chain_configs)
|
router_chain = MRKLRouterChain(self.llm, self.chain_configs)
|
||||||
chained_input = ChainedInput(
|
question = inputs[self.input_key]
|
||||||
f"{inputs[self.input_key]}\nThought:", verbose=self.verbose
|
expert_configs = [
|
||||||
|
ExpertConfig(expert_name=c.action_name, expert=c.action)
|
||||||
|
for c in self.chain_configs
|
||||||
|
]
|
||||||
|
chain = RouterExpertChain(
|
||||||
|
router_chain=router_chain,
|
||||||
|
expert_configs=expert_configs,
|
||||||
|
verbose=self.verbose
|
||||||
)
|
)
|
||||||
color_mapping = get_color_mapping(
|
output = chain.run(question)
|
||||||
list(self.action_to_chain_map.keys()), excluded_colors=["green"]
|
return {self.output_key: output}
|
||||||
)
|
|
||||||
while True:
|
|
||||||
action, action_input, thought = router_chain.get_action_and_input(
|
|
||||||
chained_input.input
|
|
||||||
)
|
|
||||||
chained_input.add(thought, color="green")
|
|
||||||
if action == FINAL_ANSWER_ACTION:
|
|
||||||
return {self.output_key: action_input}
|
|
||||||
chain = self.action_to_chain_map[action]
|
|
||||||
ca = chain(action_input)
|
|
||||||
chained_input.add("\nObservation: ")
|
|
||||||
chained_input.add(ca, color=color_mapping[action])
|
|
||||||
chained_input.add("\nThought:")
|
|
||||||
|
@ -12,6 +12,7 @@ from langchain.docstore.base import Docstore
|
|||||||
from langchain.docstore.document import Document
|
from langchain.docstore.document import Document
|
||||||
from langchain.input import ChainedInput
|
from langchain.input import ChainedInput
|
||||||
from langchain.llms.base import LLM
|
from langchain.llms.base import LLM
|
||||||
|
from langchain.chains.router_expert import RouterExpertChain, ExpertConfig
|
||||||
|
|
||||||
|
|
||||||
class ReActRouterChain(LLMRouterChain, BaseModel):
|
class ReActRouterChain(LLMRouterChain, BaseModel):
|
||||||
@ -43,6 +44,42 @@ class ReActRouterChain(LLMRouterChain, BaseModel):
|
|||||||
raise ValueError(f"Could not parse action directive: {action_str}")
|
raise ValueError(f"Could not parse action directive: {action_str}")
|
||||||
return re_matches.group(1), re_matches.group(2)
|
return re_matches.group(1), re_matches.group(2)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def finish_action_name(self) -> str:
|
||||||
|
"""The action name of when to finish the chain."""
|
||||||
|
return "Finish"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def observation_prefix(self) -> str:
|
||||||
|
"""Prefix to append the observation with."""
|
||||||
|
return f"Observation {self.i - 1}: "
|
||||||
|
|
||||||
|
@property
|
||||||
|
def router_prefix(self) -> str:
|
||||||
|
"""Prefix to append the router call with."""
|
||||||
|
return f"Thought {self.i}:"
|
||||||
|
|
||||||
|
|
||||||
|
class DocstoreExplorer:
|
||||||
|
|
||||||
|
def __init__(self, docstore: Docstore):
|
||||||
|
self.docstore=docstore
|
||||||
|
self.document = None
|
||||||
|
|
||||||
|
def search(self, term: str):
|
||||||
|
result = self.docstore.search(term)
|
||||||
|
if isinstance(result, Document):
|
||||||
|
self.document = result
|
||||||
|
return self.document.summary
|
||||||
|
else:
|
||||||
|
self.document = None
|
||||||
|
return result
|
||||||
|
|
||||||
|
def lookup(self, term: str):
|
||||||
|
if self.document is None:
|
||||||
|
raise ValueError("Cannot lookup without a successful search first")
|
||||||
|
return self.document.lookup(term)
|
||||||
|
|
||||||
|
|
||||||
class ReActChain(Chain, BaseModel):
|
class ReActChain(Chain, BaseModel):
|
||||||
"""Chain that implements the ReAct paper.
|
"""Chain that implements the ReAct paper.
|
||||||
@ -86,29 +123,15 @@ class ReActChain(Chain, BaseModel):
|
|||||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||||
question = inputs[self.input_key]
|
question = inputs[self.input_key]
|
||||||
router_chain = ReActRouterChain(self.llm)
|
router_chain = ReActRouterChain(self.llm)
|
||||||
chained_input = ChainedInput(f"{question}\nThought 1:", verbose=self.verbose)
|
docstore_explorer = DocstoreExplorer(self.docstore)
|
||||||
document = None
|
expert_configs = [
|
||||||
while True:
|
ExpertConfig(expert_name="Search", expert=docstore_explorer.search),
|
||||||
action, directive, ret_text = router_chain.get_action_and_input(
|
ExpertConfig(expert_name="Lookup", expert=docstore_explorer.lookup)
|
||||||
chained_input.input
|
]
|
||||||
)
|
chain = RouterExpertChain(
|
||||||
chained_input.add(ret_text, color="green")
|
router_chain=router_chain,
|
||||||
if action == "Search":
|
expert_configs=expert_configs,
|
||||||
result = self.docstore.search(directive)
|
verbose=self.verbose
|
||||||
if isinstance(result, Document):
|
)
|
||||||
document = result
|
output = chain.run(question)
|
||||||
observation = document.summary
|
return {self.output_key: output}
|
||||||
else:
|
|
||||||
document = None
|
|
||||||
observation = result
|
|
||||||
elif action == "Lookup":
|
|
||||||
if document is None:
|
|
||||||
raise ValueError("Cannot lookup without a successful search first")
|
|
||||||
observation = document.lookup(directive)
|
|
||||||
elif action == "Finish":
|
|
||||||
return {self.output_key: directive}
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Got unknown action directive: {action}")
|
|
||||||
chained_input.add(f"\nObservation {router_chain.i - 1}: ")
|
|
||||||
chained_input.add(observation, color="yellow")
|
|
||||||
chained_input.add(f"\nThought {router_chain.i}:")
|
|
||||||
|
@ -36,6 +36,21 @@ class RouterChain(Chain, BaseModel, ABC):
|
|||||||
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
|
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
|
||||||
"""Return action, action input, and log (in that order)."""
|
"""Return action, action input, and log (in that order)."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def observation_prefix(self) -> str:
|
||||||
|
"""Prefix to append the observation with."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def router_prefix(self) -> str:
|
||||||
|
"""Prefix to append the router call with."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def finish_action_name(self) -> str:
|
||||||
|
"""The action name of when to finish the chain."""
|
||||||
|
return "Final Answer"
|
||||||
|
|
||||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||||
action, action_input, log = self.get_action_and_input(inputs[self.input_key])
|
action, action_input, log = self.get_action_and_input(inputs[self.input_key])
|
||||||
return {
|
return {
|
||||||
|
@ -12,7 +12,7 @@ from langchain.llms.base import LLM
|
|||||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||||
from langchain.chains.router import RouterChain
|
from langchain.chains.router import RouterChain
|
||||||
|
|
||||||
FINAL_ANSWER_ACTION = "Final Answer: "
|
|
||||||
|
|
||||||
|
|
||||||
class ExpertConfig(NamedTuple):
|
class ExpertConfig(NamedTuple):
|
||||||
@ -28,6 +28,8 @@ class RouterExpertChain(Chain, BaseModel):
|
|||||||
"""Router chain."""
|
"""Router chain."""
|
||||||
expert_configs: List[ExpertConfig]
|
expert_configs: List[ExpertConfig]
|
||||||
"""Expert configs this chain has access to."""
|
"""Expert configs this chain has access to."""
|
||||||
|
starter_string: str = "\n"
|
||||||
|
"""String to put after user input but before first router."""
|
||||||
input_key: str = "question" #: :meta private:
|
input_key: str = "question" #: :meta private:
|
||||||
output_key: str = "answer" #: :meta private:
|
output_key: str = "answer" #: :meta private:
|
||||||
|
|
||||||
@ -54,22 +56,22 @@ class RouterExpertChain(Chain, BaseModel):
|
|||||||
return [self.output_key]
|
return [self.output_key]
|
||||||
|
|
||||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||||
router_chain = MRKLRouterChain(self.llm, self.chain_configs)
|
action_to_chain_map = {e.expert_name: e.expert for e in self.expert_configs}
|
||||||
chained_input = ChainedInput(
|
chained_input = ChainedInput(
|
||||||
f"{inputs[self.input_key]}", verbose=self.verbose
|
f"{inputs[self.input_key]}{self.starter_string}{self.router_chain.router_prefix}", verbose=self.verbose
|
||||||
)
|
)
|
||||||
color_mapping = get_color_mapping(
|
color_mapping = get_color_mapping(
|
||||||
[c.], excluded_colors=["green"]
|
[c.expert_name for c in self.expert_configs], excluded_colors=["green"]
|
||||||
)
|
)
|
||||||
while True:
|
while True:
|
||||||
action, action_input, thought = router_chain.get_action_and_input(
|
action, action_input, log = self.router_chain.get_action_and_input(
|
||||||
chained_input.input
|
chained_input.input
|
||||||
)
|
)
|
||||||
chained_input.add(thought, color="green")
|
chained_input.add(log, color="green")
|
||||||
if action == FINAL_ANSWER_ACTION:
|
if action == self.router_chain.finish_action_name:
|
||||||
return {self.output_key: action_input}
|
return {self.output_key: action_input}
|
||||||
chain = self.action_to_chain_map[action]
|
chain = action_to_chain_map[action]
|
||||||
ca = chain(action_input)
|
ca = chain(action_input)
|
||||||
chained_input.add("\nObservation: ")
|
chained_input.add(f"\n{self.router_chain.observation_prefix}")
|
||||||
chained_input.add(ca, color=color_mapping[action])
|
chained_input.add(ca, color=color_mapping[action])
|
||||||
chained_input.add("\nThought:")
|
chained_input.add(f"\n{self.router_chain.router_prefix}")
|
@ -10,6 +10,7 @@ from langchain.chains.self_ask_with_search.prompt import PROMPT
|
|||||||
from langchain.chains.serpapi import SerpAPIChain
|
from langchain.chains.serpapi import SerpAPIChain
|
||||||
from langchain.input import ChainedInput
|
from langchain.input import ChainedInput
|
||||||
from langchain.llms.base import LLM
|
from langchain.llms.base import LLM
|
||||||
|
from langchain.chains.router_expert import RouterExpertChain, ExpertConfig
|
||||||
|
|
||||||
|
|
||||||
class SelfAskWithSearchRouter(LLMRouterChain):
|
class SelfAskWithSearchRouter(LLMRouterChain):
|
||||||
@ -28,7 +29,10 @@ class SelfAskWithSearchRouter(LLMRouterChain):
|
|||||||
last_line = text.split("\n")[-1]
|
last_line = text.split("\n")[-1]
|
||||||
|
|
||||||
if followup not in last_line:
|
if followup not in last_line:
|
||||||
return "Final Answer", text
|
finish_string = "So the final answer is: "
|
||||||
|
if finish_string not in last_line:
|
||||||
|
raise ValueError("We should probably never get here")
|
||||||
|
return "Final Answer", text[len(finish_string):]
|
||||||
|
|
||||||
if ":" not in last_line:
|
if ":" not in last_line:
|
||||||
after_colon = last_line
|
after_colon = last_line
|
||||||
@ -42,6 +46,16 @@ class SelfAskWithSearchRouter(LLMRouterChain):
|
|||||||
|
|
||||||
return "Intermediate Answer", after_colon
|
return "Intermediate Answer", after_colon
|
||||||
|
|
||||||
|
@property
|
||||||
|
def observation_prefix(self) -> str:
|
||||||
|
"""Prefix to append the observation with."""
|
||||||
|
return "Intermediate answer: "
|
||||||
|
|
||||||
|
@property
|
||||||
|
def router_prefix(self) -> str:
|
||||||
|
"""Prefix to append the router call with."""
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
class SelfAskWithSearchChain(Chain, BaseModel):
|
class SelfAskWithSearchChain(Chain, BaseModel):
|
||||||
"""Chain that does self ask with search.
|
"""Chain that does self ask with search.
|
||||||
@ -84,16 +98,9 @@ class SelfAskWithSearchChain(Chain, BaseModel):
|
|||||||
return [self.output_key]
|
return [self.output_key]
|
||||||
|
|
||||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||||
chained_input = ChainedInput(inputs[self.input_key], verbose=self.verbose)
|
|
||||||
chained_input.add("\nAre follow up questions needed here:")
|
|
||||||
intermediate = "\nIntermediate answer:"
|
intermediate = "\nIntermediate answer:"
|
||||||
router = SelfAskWithSearchRouter(self.llm, stops=[intermediate])
|
router = SelfAskWithSearchRouter(self.llm, stops=[intermediate])
|
||||||
action, action_input, log = router.get_action_and_input(chained_input.input)
|
expert_configs = [ExpertConfig(expert_name="Intermediate Answer", expert=self.search_chain.run)]
|
||||||
chained_input.add(log, color="green")
|
chain = RouterExpertChain(router_chain=router, expert_configs=expert_configs, verbose=self.verbose, starter_string="\nAre follow up questions needed here:")
|
||||||
while action != "Final Answer":
|
output = chain.run(inputs[self.input_key])
|
||||||
external_answer = self.search_chain.run(action_input)
|
return {self.output_key: output}
|
||||||
chained_input.add(intermediate + " ")
|
|
||||||
chained_input.add(external_answer + ".", color="yellow")
|
|
||||||
action, action_input, log = router.get_action_and_input(chained_input.input)
|
|
||||||
chained_input.add(log, color="green")
|
|
||||||
return {self.output_key: action_input}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user