From c9a0f2464612a89cca137c7837b9e854a6ae9b89 Mon Sep 17 00:00:00 2001 From: Austin <77757836+teleprint-me@users.noreply.github.com> Date: Fri, 7 Jul 2023 15:08:25 -0400 Subject: [PATCH] Add verbose parameter for llamacpp (#7253) **Title:** Add verbose parameter for llamacpp **Description:** This pull request adds a 'verbose' parameter to the llamacpp module. The 'verbose' parameter, when set to True, will enable the output of detailed logs during the execution of the Llama model. This added parameter can aid in debugging and understanding the internal processes of the module. The verbose parameter is a boolean that prints verbose output to stderr when set to True. By default, the verbose parameter is set to True but can be toggled off if less output is desired. This new parameter has been added to the `validate_environment` method of the `LlamaCpp` class which initializes the `llama_cpp.Llama` API: ```python class LlamaCpp(LLM): ... @root_validator() def validate_environment(cls, values: Dict) -> Dict: ... model_param_names = [ ... "verbose", # New verbose parameter added ] ... values["client"] = Llama(model_path, **model_params) ... ``` --------- Signed-off-by: teleprint-me <77757836+teleprint-me@users.noreply.github.com> --- langchain/llms/llamacpp.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/langchain/llms/llamacpp.py b/langchain/llms/llamacpp.py index d1a6bc0c88c..a48ec6b58e1 100644 --- a/langchain/llms/llamacpp.py +++ b/langchain/llms/llamacpp.py @@ -103,6 +103,9 @@ class LlamaCpp(LLM): streaming: bool = True """Whether to stream the results, token by token.""" + verbose: bool = True + """Print verbose output to stderr.""" + @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" @@ -121,6 +124,7 @@ class LlamaCpp(LLM): "n_batch", "use_mmap", "last_n_tokens_size", + "verbose", ] model_params = {k: values[k] for k in model_param_names} # For backwards compatibility, only include if non-null.