diff --git a/langchain/agents/agent.py b/langchain/agents/agent.py index 847633ab1c8..62ea2e983b5 100644 --- a/langchain/agents/agent.py +++ b/langchain/agents/agent.py @@ -137,7 +137,6 @@ class Agent(BaseModel): llm: BaseLLM, tools: List[Tool], callback_manager: Optional[BaseCallbackManager] = None, - verbose: bool = False, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) @@ -145,7 +144,6 @@ class Agent(BaseModel): llm=llm, prompt=cls.create_prompt(tools), callback_manager=callback_manager, - verbose=verbose, ) return cls(llm_chain=llm_chain) diff --git a/langchain/chains/base.py b/langchain/chains/base.py index cc32f2eca76..af2e6cc1253 100644 --- a/langchain/chains/base.py +++ b/langchain/chains/base.py @@ -64,6 +64,17 @@ class Chain(BaseModel, ABC): """ return callback_manager or get_callback_manager() + @validator("verbose", pre=True, always=True) + def set_verbose(cls, verbose: Optional[bool]) -> bool: + """If verbose is None, set it. + + This allows users to pass in None as verbose to access the global setting. + """ + if verbose is None: + return _get_verbosity() + else: + return verbose + @property @abstractmethod def input_keys(self) -> List[str]: diff --git a/langchain/chains/llm.py b/langchain/chains/llm.py index 1cd191b7132..9a02a8149da 100644 --- a/langchain/chains/llm.py +++ b/langchain/chains/llm.py @@ -53,7 +53,6 @@ class LLMChain(Chain, BaseModel): def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: """Generate LLM result from inputs.""" - self.llm.verbose = self.verbose stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] diff --git a/langchain/chains/qa_with_sources/__init__.py b/langchain/chains/qa_with_sources/__init__.py index 88b60aecfea..56bb9bcd2e2 100644 --- a/langchain/chains/qa_with_sources/__init__.py +++ b/langchain/chains/qa_with_sources/__init__.py @@ -26,7 +26,7 @@ def _load_stuff_chain( llm: BaseLLM, prompt: BasePromptTemplate = stuff_prompt.PROMPT, document_variable_name: str = "summaries", - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> StuffDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) @@ -49,7 +49,7 @@ def _load_map_reduce_chain( collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLLM] = None, collapse_llm: Optional[BaseLLM] = None, - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) @@ -97,7 +97,7 @@ def _load_refine_chain( document_variable_name: str = "context_str", initial_response_name: str = "existing_answer", refine_llm: Optional[BaseLLM] = None, - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> RefineDocumentsChain: initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) @@ -115,7 +115,10 @@ def _load_refine_chain( def load_qa_with_sources_chain( - llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any + llm: BaseLLM, + chain_type: str = "stuff", + verbose: Optional[bool] = None, + **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load question answering with sources chain. diff --git a/langchain/chains/question_answering/__init__.py b/langchain/chains/question_answering/__init__.py index 7685d7ab184..1e9bc7cdb80 100644 --- a/langchain/chains/question_answering/__init__.py +++ b/langchain/chains/question_answering/__init__.py @@ -26,7 +26,7 @@ def _load_stuff_chain( llm: BaseLLM, prompt: BasePromptTemplate = stuff_prompt.PROMPT, document_variable_name: str = "context", - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> StuffDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) @@ -48,7 +48,7 @@ def _load_map_reduce_chain( collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLLM] = None, collapse_llm: Optional[BaseLLM] = None, - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) @@ -94,7 +94,7 @@ def _load_refine_chain( document_variable_name: str = "context_str", initial_response_name: str = "existing_answer", refine_llm: Optional[BaseLLM] = None, - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> RefineDocumentsChain: initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) @@ -111,7 +111,10 @@ def _load_refine_chain( def load_qa_chain( - llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any + llm: BaseLLM, + chain_type: str = "stuff", + verbose: Optional[bool] = None, + **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load question answering chain. diff --git a/langchain/chains/summarize/__init__.py b/langchain/chains/summarize/__init__.py index 8605ed2c2be..d0c9f35b313 100644 --- a/langchain/chains/summarize/__init__.py +++ b/langchain/chains/summarize/__init__.py @@ -22,7 +22,7 @@ def _load_stuff_chain( llm: BaseLLM, prompt: BasePromptTemplate = stuff_prompt.PROMPT, document_variable_name: str = "text", - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> StuffDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) @@ -44,7 +44,7 @@ def _load_map_reduce_chain( collapse_prompt: Optional[BasePromptTemplate] = None, reduce_llm: Optional[BaseLLM] = None, collapse_llm: Optional[BaseLLM] = None, - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose) @@ -90,7 +90,7 @@ def _load_refine_chain( document_variable_name: str = "text", initial_response_name: str = "existing_answer", refine_llm: Optional[BaseLLM] = None, - verbose: bool = False, + verbose: Optional[bool] = None, **kwargs: Any, ) -> RefineDocumentsChain: @@ -108,7 +108,10 @@ def _load_refine_chain( def load_summarize_chain( - llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any + llm: BaseLLM, + chain_type: str = "stuff", + verbose: Optional[bool] = None, + **kwargs: Any, ) -> BaseCombineDocumentsChain: """Load summarizing chain. diff --git a/langchain/llms/base.py b/langchain/llms/base.py index fbd77546b76..22e63b49084 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -41,6 +41,17 @@ class BaseLLM(BaseModel, ABC): """ return callback_manager or get_callback_manager() + @validator("verbose", pre=True, always=True) + def set_verbose(cls, verbose: Optional[bool]) -> bool: + """If verbose is None, set it. + + This allows users to pass in None as verbose to access the global setting. + """ + if verbose is None: + return _get_verbosity() + else: + return verbose + @abstractmethod def _generate( self, prompts: List[str], stop: Optional[List[str]] = None