mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-01 15:51:00 +00:00
llamamodel: re-enable error messages by default (#1537)
This commit is contained in:
parent
f505619c84
commit
0fe2e19691
@ -36,17 +36,18 @@ namespace {
|
||||
const char *modelType_ = "LLaMA";
|
||||
}
|
||||
|
||||
static void null_log_callback(enum ggml_log_level level, const char* text, void* userdata) {
|
||||
(void)level;
|
||||
(void)text;
|
||||
(void)userdata;
|
||||
}
|
||||
|
||||
static bool llama_verbose() {
|
||||
const char* var = getenv("GPT4ALL_VERBOSE_LLAMACPP");
|
||||
return var && *var;
|
||||
}
|
||||
|
||||
static void llama_log_callback(enum ggml_log_level level, const char *text, void *userdata) {
|
||||
(void)userdata;
|
||||
if (llama_verbose() || level <= GGML_LOG_LEVEL_ERROR) {
|
||||
fputs(text, stderr);
|
||||
}
|
||||
}
|
||||
|
||||
struct gpt_params {
|
||||
int32_t seed = -1; // RNG seed
|
||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||
@ -403,9 +404,7 @@ DLL_EXPORT bool magic_match(const char * fname) {
|
||||
}
|
||||
|
||||
DLL_EXPORT LLModel *construct() {
|
||||
if (!llama_verbose()) {
|
||||
llama_log_set(null_log_callback, nullptr);
|
||||
}
|
||||
llama_log_set(llama_log_callback, nullptr);
|
||||
return new LLamaModel;
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY,
|
||||
|
||||
setup(
|
||||
name=package_name,
|
||||
version="2.0.0rc1",
|
||||
version="2.0.0rc2",
|
||||
description="Python bindings for GPT4All",
|
||||
author="Nomic and the Open Source Community",
|
||||
author_email="support@nomic.ai",
|
||||
|
Loading…
Reference in New Issue
Block a user