From 0fe2e19691ffb98499a9b54a0f0c27b5188ec37e Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Thu, 19 Oct 2023 13:46:33 -0400 Subject: [PATCH] llamamodel: re-enable error messages by default (#1537) --- gpt4all-backend/llamamodel.cpp | 17 ++++++++--------- gpt4all-bindings/python/setup.py | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/gpt4all-backend/llamamodel.cpp b/gpt4all-backend/llamamodel.cpp index d5b35e47..23766b62 100644 --- a/gpt4all-backend/llamamodel.cpp +++ b/gpt4all-backend/llamamodel.cpp @@ -36,17 +36,18 @@ namespace { const char *modelType_ = "LLaMA"; } -static void null_log_callback(enum ggml_log_level level, const char* text, void* userdata) { - (void)level; - (void)text; - (void)userdata; -} - static bool llama_verbose() { const char* var = getenv("GPT4ALL_VERBOSE_LLAMACPP"); return var && *var; } +static void llama_log_callback(enum ggml_log_level level, const char *text, void *userdata) { + (void)userdata; + if (llama_verbose() || level <= GGML_LOG_LEVEL_ERROR) { + fputs(text, stderr); + } +} + struct gpt_params { int32_t seed = -1; // RNG seed int32_t n_keep = 0; // number of tokens to keep from initial prompt @@ -403,9 +404,7 @@ DLL_EXPORT bool magic_match(const char * fname) { } DLL_EXPORT LLModel *construct() { - if (!llama_verbose()) { - llama_log_set(null_log_callback, nullptr); - } + llama_log_set(llama_log_callback, nullptr); return new LLamaModel; } } diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index 4d5d7e6d..e339e1db 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY, setup( name=package_name, - version="2.0.0rc1", + version="2.0.0rc2", description="Python bindings for GPT4All", author="Nomic and the Open Source Community", author_email="support@nomic.ai",