mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 17:38:36 +00:00
fix verbosity (#496)
1. remove verbose from someplace it didnt relaly belong 2. everywhere else, make verbose Optional[bool] with default to None 3. make base classes accept None, and then look up globabl verbosity if thats the case
This commit is contained in:
parent
164806a844
commit
b902bddb8a
@ -137,7 +137,6 @@ class Agent(BaseModel):
|
||||
llm: BaseLLM,
|
||||
tools: List[Tool],
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
verbose: bool = False,
|
||||
) -> Agent:
|
||||
"""Construct an agent from an LLM and tools."""
|
||||
cls._validate_tools(tools)
|
||||
@ -145,7 +144,6 @@ class Agent(BaseModel):
|
||||
llm=llm,
|
||||
prompt=cls.create_prompt(tools),
|
||||
callback_manager=callback_manager,
|
||||
verbose=verbose,
|
||||
)
|
||||
return cls(llm_chain=llm_chain)
|
||||
|
||||
|
@ -64,6 +64,17 @@ class Chain(BaseModel, ABC):
|
||||
"""
|
||||
return callback_manager or get_callback_manager()
|
||||
|
||||
@validator("verbose", pre=True, always=True)
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||
"""If verbose is None, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
"""
|
||||
if verbose is None:
|
||||
return _get_verbosity()
|
||||
else:
|
||||
return verbose
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def input_keys(self) -> List[str]:
|
||||
|
@ -53,7 +53,6 @@ class LLMChain(Chain, BaseModel):
|
||||
|
||||
def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult:
|
||||
"""Generate LLM result from inputs."""
|
||||
self.llm.verbose = self.verbose
|
||||
stop = None
|
||||
if "stop" in input_list[0]:
|
||||
stop = input_list[0]["stop"]
|
||||
|
@ -26,7 +26,7 @@ def _load_stuff_chain(
|
||||
llm: BaseLLM,
|
||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||
document_variable_name: str = "summaries",
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||
@ -49,7 +49,7 @@ def _load_map_reduce_chain(
|
||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||
reduce_llm: Optional[BaseLLM] = None,
|
||||
collapse_llm: Optional[BaseLLM] = None,
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
@ -97,7 +97,7 @@ def _load_refine_chain(
|
||||
document_variable_name: str = "context_str",
|
||||
initial_response_name: str = "existing_answer",
|
||||
refine_llm: Optional[BaseLLM] = None,
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
@ -115,7 +115,10 @@ def _load_refine_chain(
|
||||
|
||||
|
||||
def load_qa_with_sources_chain(
|
||||
llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any
|
||||
llm: BaseLLM,
|
||||
chain_type: str = "stuff",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Load question answering with sources chain.
|
||||
|
||||
|
@ -26,7 +26,7 @@ def _load_stuff_chain(
|
||||
llm: BaseLLM,
|
||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||
document_variable_name: str = "context",
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||
@ -48,7 +48,7 @@ def _load_map_reduce_chain(
|
||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||
reduce_llm: Optional[BaseLLM] = None,
|
||||
collapse_llm: Optional[BaseLLM] = None,
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
@ -94,7 +94,7 @@ def _load_refine_chain(
|
||||
document_variable_name: str = "context_str",
|
||||
initial_response_name: str = "existing_answer",
|
||||
refine_llm: Optional[BaseLLM] = None,
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||
@ -111,7 +111,10 @@ def _load_refine_chain(
|
||||
|
||||
|
||||
def load_qa_chain(
|
||||
llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any
|
||||
llm: BaseLLM,
|
||||
chain_type: str = "stuff",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Load question answering chain.
|
||||
|
||||
|
@ -22,7 +22,7 @@ def _load_stuff_chain(
|
||||
llm: BaseLLM,
|
||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||
document_variable_name: str = "text",
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||
@ -44,7 +44,7 @@ def _load_map_reduce_chain(
|
||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||
reduce_llm: Optional[BaseLLM] = None,
|
||||
collapse_llm: Optional[BaseLLM] = None,
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose)
|
||||
@ -90,7 +90,7 @@ def _load_refine_chain(
|
||||
document_variable_name: str = "text",
|
||||
initial_response_name: str = "existing_answer",
|
||||
refine_llm: Optional[BaseLLM] = None,
|
||||
verbose: bool = False,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
|
||||
@ -108,7 +108,10 @@ def _load_refine_chain(
|
||||
|
||||
|
||||
def load_summarize_chain(
|
||||
llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any
|
||||
llm: BaseLLM,
|
||||
chain_type: str = "stuff",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Load summarizing chain.
|
||||
|
||||
|
@ -41,6 +41,17 @@ class BaseLLM(BaseModel, ABC):
|
||||
"""
|
||||
return callback_manager or get_callback_manager()
|
||||
|
||||
@validator("verbose", pre=True, always=True)
|
||||
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||
"""If verbose is None, set it.
|
||||
|
||||
This allows users to pass in None as verbose to access the global setting.
|
||||
"""
|
||||
if verbose is None:
|
||||
return _get_verbosity()
|
||||
else:
|
||||
return verbose
|
||||
|
||||
@abstractmethod
|
||||
def _generate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
|
Loading…
Reference in New Issue
Block a user