mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-07-04 19:17:59 +00:00
Turn off the debugging messages by default.
This commit is contained in:
parent
c6df4645c8
commit
618895f0a1
@ -421,15 +421,15 @@ bool ChatLLM::prompt(const QString &prompt, const QString &prompt_template, int3
|
|||||||
m_ctx.repeat_penalty = repeat_penalty;
|
m_ctx.repeat_penalty = repeat_penalty;
|
||||||
m_ctx.repeat_last_n = repeat_penalty_tokens;
|
m_ctx.repeat_last_n = repeat_penalty_tokens;
|
||||||
m_modelInfo.model->setThreadCount(n_threads);
|
m_modelInfo.model->setThreadCount(n_threads);
|
||||||
//#if defined(DEBUG)
|
#if defined(DEBUG)
|
||||||
printf("%s", qPrintable(instructPrompt));
|
printf("%s", qPrintable(instructPrompt));
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
//#endif
|
#endif
|
||||||
m_modelInfo.model->prompt(instructPrompt.toStdString(), promptFunc, responseFunc, recalcFunc, m_ctx);
|
m_modelInfo.model->prompt(instructPrompt.toStdString(), promptFunc, responseFunc, recalcFunc, m_ctx);
|
||||||
//#if defined(DEBUG)
|
#if defined(DEBUG)
|
||||||
printf("\n");
|
printf("\n");
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
//#endif
|
#endif
|
||||||
m_responseLogits += m_ctx.logits.size() - logitsBefore;
|
m_responseLogits += m_ctx.logits.size() - logitsBefore;
|
||||||
std::string trimmed = trim_whitespace(m_response);
|
std::string trimmed = trim_whitespace(m_response);
|
||||||
if (trimmed != m_response) {
|
if (trimmed != m_response) {
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
#include <QTimer>
|
#include <QTimer>
|
||||||
#include <QPdfDocument>
|
#include <QPdfDocument>
|
||||||
|
|
||||||
#define DEBUG
|
//#define DEBUG
|
||||||
//#define DEBUG_EXAMPLE
|
//#define DEBUG_EXAMPLE
|
||||||
|
|
||||||
#define LOCALDOCS_VERSION 0
|
#define LOCALDOCS_VERSION 0
|
||||||
|
Loading…
Reference in New Issue
Block a user