mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-06 21:43:44 +00:00
Harrison/official pre release (#8106)
This commit is contained in:
@@ -0,0 +1,9 @@
|
||||
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
|
||||
from langchain_experimental.plan_and_execute.executors.agent_executor import (
|
||||
load_agent_executor,
|
||||
)
|
||||
from langchain_experimental.plan_and_execute.planners.chat_planner import (
|
||||
load_chat_planner,
|
||||
)
|
||||
|
||||
__all__ = ["PlanAndExecute", "load_agent_executor", "load_chat_planner"]
|
@@ -0,0 +1,60 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import Field
|
||||
|
||||
from langchain_experimental.plan_and_execute.executors.base import BaseExecutor
|
||||
from langchain_experimental.plan_and_execute.planners.base import BasePlanner
|
||||
from langchain_experimental.plan_and_execute.schema import (
|
||||
BaseStepContainer,
|
||||
ListStepContainer,
|
||||
)
|
||||
|
||||
|
||||
class PlanAndExecute(Chain):
|
||||
planner: BasePlanner
|
||||
executor: BaseExecutor
|
||||
step_container: BaseStepContainer = Field(default_factory=ListStepContainer)
|
||||
input_key: str = "input"
|
||||
output_key: str = "output"
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return [self.output_key]
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||
) -> Dict[str, Any]:
|
||||
plan = self.planner.plan(
|
||||
inputs,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
)
|
||||
if run_manager:
|
||||
run_manager.on_text(str(plan), verbose=self.verbose)
|
||||
for step in plan.steps:
|
||||
_new_inputs = {
|
||||
"previous_steps": self.step_container,
|
||||
"current_step": step,
|
||||
"objective": inputs[self.input_key],
|
||||
}
|
||||
new_inputs = {**_new_inputs, **inputs}
|
||||
response = self.executor.step(
|
||||
new_inputs,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
)
|
||||
if run_manager:
|
||||
run_manager.on_text(
|
||||
f"*****\n\nStep: {step.value}", verbose=self.verbose
|
||||
)
|
||||
run_manager.on_text(
|
||||
f"\n\nResponse: {response.response}", verbose=self.verbose
|
||||
)
|
||||
self.step_container.add_step(step, response)
|
||||
return {self.output_key: self.step_container.get_final_response()}
|
@@ -0,0 +1,55 @@
|
||||
from typing import List
|
||||
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.structured_chat.base import StructuredChatAgent
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
|
||||
from langchain_experimental.plan_and_execute.executors.base import ChainExecutor
|
||||
|
||||
HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps}
|
||||
|
||||
Current objective: {current_step}
|
||||
|
||||
{agent_scratchpad}"""
|
||||
|
||||
TASK_PREFIX = """{objective}
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def load_agent_executor(
|
||||
llm: BaseLanguageModel,
|
||||
tools: List[BaseTool],
|
||||
verbose: bool = False,
|
||||
include_task_in_prompt: bool = False,
|
||||
) -> ChainExecutor:
|
||||
"""
|
||||
Load an agent executor.
|
||||
|
||||
Args:
|
||||
llm: BaseLanguageModel
|
||||
tools: List[BaseTool]
|
||||
verbose: bool. Defaults to False.
|
||||
include_task_in_prompt: bool. Defaults to False.
|
||||
|
||||
Returns:
|
||||
ChainExecutor
|
||||
"""
|
||||
input_variables = ["previous_steps", "current_step", "agent_scratchpad"]
|
||||
template = HUMAN_MESSAGE_TEMPLATE
|
||||
|
||||
if include_task_in_prompt:
|
||||
input_variables.append("objective")
|
||||
template = TASK_PREFIX + template
|
||||
|
||||
agent = StructuredChatAgent.from_llm_and_tools(
|
||||
llm,
|
||||
tools,
|
||||
human_message_template=template,
|
||||
input_variables=input_variables,
|
||||
)
|
||||
agent_executor = AgentExecutor.from_agent_and_tools(
|
||||
agent=agent, tools=tools, verbose=verbose
|
||||
)
|
||||
return ChainExecutor(chain=agent_executor)
|
@@ -0,0 +1,40 @@
|
||||
from abc import abstractmethod
|
||||
from typing import Any
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain_experimental.plan_and_execute.schema import StepResponse
|
||||
|
||||
|
||||
class BaseExecutor(BaseModel):
|
||||
@abstractmethod
|
||||
def step(
|
||||
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> StepResponse:
|
||||
"""Take step."""
|
||||
|
||||
@abstractmethod
|
||||
async def astep(
|
||||
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> StepResponse:
|
||||
"""Take step."""
|
||||
|
||||
|
||||
class ChainExecutor(BaseExecutor):
|
||||
chain: Chain
|
||||
|
||||
def step(
|
||||
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> StepResponse:
|
||||
"""Take step."""
|
||||
response = self.chain.run(**inputs, callbacks=callbacks)
|
||||
return StepResponse(response=response)
|
||||
|
||||
async def astep(
|
||||
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> StepResponse:
|
||||
"""Take step."""
|
||||
response = await self.chain.arun(**inputs, callbacks=callbacks)
|
||||
return StepResponse(response=response)
|
@@ -0,0 +1,40 @@
|
||||
from abc import abstractmethod
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chains.llm import LLMChain
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParser
|
||||
|
||||
|
||||
class BasePlanner(BaseModel):
|
||||
@abstractmethod
|
||||
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
|
||||
"""Given input, decide what to do."""
|
||||
|
||||
@abstractmethod
|
||||
async def aplan(
|
||||
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> Plan:
|
||||
"""Given input, decide what to do."""
|
||||
|
||||
|
||||
class LLMPlanner(BasePlanner):
|
||||
llm_chain: LLMChain
|
||||
output_parser: PlanOutputParser
|
||||
stop: Optional[List] = None
|
||||
|
||||
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
|
||||
"""Given input, decide what to do."""
|
||||
llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks)
|
||||
return self.output_parser.parse(llm_response)
|
||||
|
||||
async def aplan(
|
||||
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
|
||||
) -> Plan:
|
||||
"""Given input, decide what to do."""
|
||||
llm_response = await self.llm_chain.arun(
|
||||
**inputs, stop=self.stop, callbacks=callbacks
|
||||
)
|
||||
return self.output_parser.parse(llm_response)
|
@@ -0,0 +1,56 @@
|
||||
import re
|
||||
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.schema.language_model import BaseLanguageModel
|
||||
from langchain.schema.messages import SystemMessage
|
||||
|
||||
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
|
||||
from langchain_experimental.plan_and_execute.schema import (
|
||||
Plan,
|
||||
PlanOutputParser,
|
||||
Step,
|
||||
)
|
||||
|
||||
SYSTEM_PROMPT = (
|
||||
"Let's first understand the problem and devise a plan to solve the problem."
|
||||
" Please output the plan starting with the header 'Plan:' "
|
||||
"and then followed by a numbered list of steps. "
|
||||
"Please make the plan the minimum number of steps required "
|
||||
"to accurately complete the task. If the task is a question, "
|
||||
"the final step should almost always be 'Given the above steps taken, "
|
||||
"please respond to the users original question'. "
|
||||
"At the end of your plan, say '<END_OF_PLAN>'"
|
||||
)
|
||||
|
||||
|
||||
class PlanningOutputParser(PlanOutputParser):
|
||||
def parse(self, text: str) -> Plan:
|
||||
steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]]
|
||||
return Plan(steps=steps)
|
||||
|
||||
|
||||
def load_chat_planner(
|
||||
llm: BaseLanguageModel, system_prompt: str = SYSTEM_PROMPT
|
||||
) -> LLMPlanner:
|
||||
"""
|
||||
Load a chat planner.
|
||||
Args:
|
||||
llm: Language model.
|
||||
system_prompt: System prompt.
|
||||
|
||||
Returns:
|
||||
LLMPlanner
|
||||
"""
|
||||
prompt_template = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
SystemMessage(content=system_prompt),
|
||||
HumanMessagePromptTemplate.from_template("{input}"),
|
||||
]
|
||||
)
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt_template)
|
||||
return LLMPlanner(
|
||||
llm_chain=llm_chain,
|
||||
output_parser=PlanningOutputParser(),
|
||||
stop=["<END_OF_PLAN>"],
|
||||
)
|
@@ -0,0 +1,46 @@
|
||||
from abc import abstractmethod
|
||||
from typing import List, Tuple
|
||||
|
||||
from langchain.schema import BaseOutputParser
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class Step(BaseModel):
|
||||
value: str
|
||||
|
||||
|
||||
class Plan(BaseModel):
|
||||
steps: List[Step]
|
||||
|
||||
|
||||
class StepResponse(BaseModel):
|
||||
response: str
|
||||
|
||||
|
||||
class BaseStepContainer(BaseModel):
|
||||
@abstractmethod
|
||||
def add_step(self, step: Step, step_response: StepResponse) -> None:
|
||||
"""Add step and step response to the container."""
|
||||
|
||||
@abstractmethod
|
||||
def get_final_response(self) -> str:
|
||||
"""Return the final response based on steps taken."""
|
||||
|
||||
|
||||
class ListStepContainer(BaseStepContainer):
|
||||
steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list)
|
||||
|
||||
def add_step(self, step: Step, step_response: StepResponse) -> None:
|
||||
self.steps.append((step, step_response))
|
||||
|
||||
def get_steps(self) -> List[Tuple[Step, StepResponse]]:
|
||||
return self.steps
|
||||
|
||||
def get_final_response(self) -> str:
|
||||
return self.steps[-1][1].response
|
||||
|
||||
|
||||
class PlanOutputParser(BaseOutputParser):
|
||||
@abstractmethod
|
||||
def parse(self, text: str) -> Plan:
|
||||
"""Parse into a plan."""
|
Reference in New Issue
Block a user