mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-29 09:58:44 +00:00
fix verbosity (#496)
1. remove verbose from someplace it didnt relaly belong 2. everywhere else, make verbose Optional[bool] with default to None 3. make base classes accept None, and then look up globabl verbosity if thats the case
This commit is contained in:
parent
164806a844
commit
b902bddb8a
@ -137,7 +137,6 @@ class Agent(BaseModel):
|
|||||||
llm: BaseLLM,
|
llm: BaseLLM,
|
||||||
tools: List[Tool],
|
tools: List[Tool],
|
||||||
callback_manager: Optional[BaseCallbackManager] = None,
|
callback_manager: Optional[BaseCallbackManager] = None,
|
||||||
verbose: bool = False,
|
|
||||||
) -> Agent:
|
) -> Agent:
|
||||||
"""Construct an agent from an LLM and tools."""
|
"""Construct an agent from an LLM and tools."""
|
||||||
cls._validate_tools(tools)
|
cls._validate_tools(tools)
|
||||||
@ -145,7 +144,6 @@ class Agent(BaseModel):
|
|||||||
llm=llm,
|
llm=llm,
|
||||||
prompt=cls.create_prompt(tools),
|
prompt=cls.create_prompt(tools),
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
verbose=verbose,
|
|
||||||
)
|
)
|
||||||
return cls(llm_chain=llm_chain)
|
return cls(llm_chain=llm_chain)
|
||||||
|
|
||||||
|
@ -64,6 +64,17 @@ class Chain(BaseModel, ABC):
|
|||||||
"""
|
"""
|
||||||
return callback_manager or get_callback_manager()
|
return callback_manager or get_callback_manager()
|
||||||
|
|
||||||
|
@validator("verbose", pre=True, always=True)
|
||||||
|
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||||
|
"""If verbose is None, set it.
|
||||||
|
|
||||||
|
This allows users to pass in None as verbose to access the global setting.
|
||||||
|
"""
|
||||||
|
if verbose is None:
|
||||||
|
return _get_verbosity()
|
||||||
|
else:
|
||||||
|
return verbose
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def input_keys(self) -> List[str]:
|
def input_keys(self) -> List[str]:
|
||||||
|
@ -53,7 +53,6 @@ class LLMChain(Chain, BaseModel):
|
|||||||
|
|
||||||
def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult:
|
def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult:
|
||||||
"""Generate LLM result from inputs."""
|
"""Generate LLM result from inputs."""
|
||||||
self.llm.verbose = self.verbose
|
|
||||||
stop = None
|
stop = None
|
||||||
if "stop" in input_list[0]:
|
if "stop" in input_list[0]:
|
||||||
stop = input_list[0]["stop"]
|
stop = input_list[0]["stop"]
|
||||||
|
@ -26,7 +26,7 @@ def _load_stuff_chain(
|
|||||||
llm: BaseLLM,
|
llm: BaseLLM,
|
||||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||||
document_variable_name: str = "summaries",
|
document_variable_name: str = "summaries",
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> StuffDocumentsChain:
|
) -> StuffDocumentsChain:
|
||||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||||
@ -49,7 +49,7 @@ def _load_map_reduce_chain(
|
|||||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||||
reduce_llm: Optional[BaseLLM] = None,
|
reduce_llm: Optional[BaseLLM] = None,
|
||||||
collapse_llm: Optional[BaseLLM] = None,
|
collapse_llm: Optional[BaseLLM] = None,
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> MapReduceDocumentsChain:
|
) -> MapReduceDocumentsChain:
|
||||||
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||||
@ -97,7 +97,7 @@ def _load_refine_chain(
|
|||||||
document_variable_name: str = "context_str",
|
document_variable_name: str = "context_str",
|
||||||
initial_response_name: str = "existing_answer",
|
initial_response_name: str = "existing_answer",
|
||||||
refine_llm: Optional[BaseLLM] = None,
|
refine_llm: Optional[BaseLLM] = None,
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> RefineDocumentsChain:
|
) -> RefineDocumentsChain:
|
||||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||||
@ -115,7 +115,10 @@ def _load_refine_chain(
|
|||||||
|
|
||||||
|
|
||||||
def load_qa_with_sources_chain(
|
def load_qa_with_sources_chain(
|
||||||
llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any
|
llm: BaseLLM,
|
||||||
|
chain_type: str = "stuff",
|
||||||
|
verbose: Optional[bool] = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> BaseCombineDocumentsChain:
|
) -> BaseCombineDocumentsChain:
|
||||||
"""Load question answering with sources chain.
|
"""Load question answering with sources chain.
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ def _load_stuff_chain(
|
|||||||
llm: BaseLLM,
|
llm: BaseLLM,
|
||||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||||
document_variable_name: str = "context",
|
document_variable_name: str = "context",
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> StuffDocumentsChain:
|
) -> StuffDocumentsChain:
|
||||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||||
@ -48,7 +48,7 @@ def _load_map_reduce_chain(
|
|||||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||||
reduce_llm: Optional[BaseLLM] = None,
|
reduce_llm: Optional[BaseLLM] = None,
|
||||||
collapse_llm: Optional[BaseLLM] = None,
|
collapse_llm: Optional[BaseLLM] = None,
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> MapReduceDocumentsChain:
|
) -> MapReduceDocumentsChain:
|
||||||
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||||
@ -94,7 +94,7 @@ def _load_refine_chain(
|
|||||||
document_variable_name: str = "context_str",
|
document_variable_name: str = "context_str",
|
||||||
initial_response_name: str = "existing_answer",
|
initial_response_name: str = "existing_answer",
|
||||||
refine_llm: Optional[BaseLLM] = None,
|
refine_llm: Optional[BaseLLM] = None,
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> RefineDocumentsChain:
|
) -> RefineDocumentsChain:
|
||||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
|
||||||
@ -111,7 +111,10 @@ def _load_refine_chain(
|
|||||||
|
|
||||||
|
|
||||||
def load_qa_chain(
|
def load_qa_chain(
|
||||||
llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any
|
llm: BaseLLM,
|
||||||
|
chain_type: str = "stuff",
|
||||||
|
verbose: Optional[bool] = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> BaseCombineDocumentsChain:
|
) -> BaseCombineDocumentsChain:
|
||||||
"""Load question answering chain.
|
"""Load question answering chain.
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ def _load_stuff_chain(
|
|||||||
llm: BaseLLM,
|
llm: BaseLLM,
|
||||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||||
document_variable_name: str = "text",
|
document_variable_name: str = "text",
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> StuffDocumentsChain:
|
) -> StuffDocumentsChain:
|
||||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
|
||||||
@ -44,7 +44,7 @@ def _load_map_reduce_chain(
|
|||||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||||
reduce_llm: Optional[BaseLLM] = None,
|
reduce_llm: Optional[BaseLLM] = None,
|
||||||
collapse_llm: Optional[BaseLLM] = None,
|
collapse_llm: Optional[BaseLLM] = None,
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> MapReduceDocumentsChain:
|
) -> MapReduceDocumentsChain:
|
||||||
map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose)
|
map_chain = LLMChain(llm=llm, prompt=map_prompt, verbose=verbose)
|
||||||
@ -90,7 +90,7 @@ def _load_refine_chain(
|
|||||||
document_variable_name: str = "text",
|
document_variable_name: str = "text",
|
||||||
initial_response_name: str = "existing_answer",
|
initial_response_name: str = "existing_answer",
|
||||||
refine_llm: Optional[BaseLLM] = None,
|
refine_llm: Optional[BaseLLM] = None,
|
||||||
verbose: bool = False,
|
verbose: Optional[bool] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> RefineDocumentsChain:
|
) -> RefineDocumentsChain:
|
||||||
|
|
||||||
@ -108,7 +108,10 @@ def _load_refine_chain(
|
|||||||
|
|
||||||
|
|
||||||
def load_summarize_chain(
|
def load_summarize_chain(
|
||||||
llm: BaseLLM, chain_type: str = "stuff", verbose: bool = False, **kwargs: Any
|
llm: BaseLLM,
|
||||||
|
chain_type: str = "stuff",
|
||||||
|
verbose: Optional[bool] = None,
|
||||||
|
**kwargs: Any,
|
||||||
) -> BaseCombineDocumentsChain:
|
) -> BaseCombineDocumentsChain:
|
||||||
"""Load summarizing chain.
|
"""Load summarizing chain.
|
||||||
|
|
||||||
|
@ -41,6 +41,17 @@ class BaseLLM(BaseModel, ABC):
|
|||||||
"""
|
"""
|
||||||
return callback_manager or get_callback_manager()
|
return callback_manager or get_callback_manager()
|
||||||
|
|
||||||
|
@validator("verbose", pre=True, always=True)
|
||||||
|
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
||||||
|
"""If verbose is None, set it.
|
||||||
|
|
||||||
|
This allows users to pass in None as verbose to access the global setting.
|
||||||
|
"""
|
||||||
|
if verbose is None:
|
||||||
|
return _get_verbosity()
|
||||||
|
else:
|
||||||
|
return verbose
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _generate(
|
def _generate(
|
||||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||||
|
Loading…
Reference in New Issue
Block a user