From dbe1d029ecac99ccc8874f1759a56b2af380c6f8 Mon Sep 17 00:00:00 2001 From: northern-64bit <75195383+northern-64bit@users.noreply.github.com> Date: Fri, 23 Jun 2023 23:47:10 +0200 Subject: [PATCH] Fix grammar mistake in base.py in planners (#6611) Fix a typo in `langchain/experimental/plan_and_execute/planners/base.py`, by changing "Given input, decided what to do." to "Given input, decide what to do." This is in the docstring for functions running LLM chains which shall create a plan, "decided" does not make any sense in this context. --- langchain/experimental/plan_and_execute/planners/base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/langchain/experimental/plan_and_execute/planners/base.py b/langchain/experimental/plan_and_execute/planners/base.py index 63e8a8cd039..b91109bab2b 100644 --- a/langchain/experimental/plan_and_execute/planners/base.py +++ b/langchain/experimental/plan_and_execute/planners/base.py @@ -11,13 +11,13 @@ from langchain.experimental.plan_and_execute.schema import Plan, PlanOutputParse class BasePlanner(BaseModel): @abstractmethod def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: - """Given input, decided what to do.""" + """Given input, decide what to do.""" @abstractmethod async def aplan( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> Plan: - """Given input, decided what to do.""" + """Given input, decide what to do.""" class LLMPlanner(BasePlanner): @@ -26,14 +26,14 @@ class LLMPlanner(BasePlanner): stop: Optional[List] = None def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: - """Given input, decided what to do.""" + """Given input, decide what to do.""" llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) return self.output_parser.parse(llm_response) async def aplan( self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any ) -> Plan: - """Given input, decided what to do.""" + """Given input, decide what to do.""" llm_response = await self.llm_chain.arun( **inputs, stop=self.stop, callbacks=callbacks )