mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 02:50:36 +00:00
chat: implement display of model loading warnings (#2034)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -46,6 +46,7 @@ void Chat::connectLLM()
|
||||
connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::modelLoadingError, this, &Chat::handleModelLoadingError, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::modelLoadingWarning, this, &Chat::modelLoadingWarning, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::recalcChanged, this, &Chat::handleRecalculating, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection);
|
||||
@@ -333,7 +334,8 @@ void Chat::handleRecalculating()
|
||||
|
||||
void Chat::handleModelLoadingError(const QString &error)
|
||||
{
|
||||
qWarning() << "ERROR:" << qPrintable(error) << "id" << id();
|
||||
auto stream = qWarning().noquote() << "ERROR:" << error << "id";
|
||||
stream.quote() << id();
|
||||
m_modelLoadingError = error;
|
||||
emit modelLoadingErrorChanged();
|
||||
}
|
||||
|
Reference in New Issue
Block a user