mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 02:50:36 +00:00
Don't store db results in ChatLLM.
This commit is contained in:
@@ -58,6 +58,7 @@ void Chat::connectLLM()
|
||||
connect(m_llmodel, &ChatLLM::recalcChanged, this, &Chat::handleRecalculating, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::databaseResultsChanged, this, &Chat::handleDatabaseResultsChanged, Qt::QueuedConnection);
|
||||
|
||||
connect(this, &Chat::promptRequested, m_llmodel, &ChatLLM::prompt, Qt::QueuedConnection);
|
||||
connect(this, &Chat::modelNameChangeRequested, m_llmodel, &ChatLLM::modelNameChangeRequested, Qt::QueuedConnection);
|
||||
@@ -177,11 +178,6 @@ void Chat::handleModelLoadedChanged()
|
||||
deleteLater();
|
||||
}
|
||||
|
||||
QList<ResultInfo> Chat::databaseResults() const
|
||||
{
|
||||
return m_llmodel->databaseResults();
|
||||
}
|
||||
|
||||
void Chat::promptProcessing()
|
||||
{
|
||||
m_responseState = !databaseResults().isEmpty() ? Chat::LocalDocsProcessing : Chat::PromptProcessing;
|
||||
@@ -348,6 +344,11 @@ void Chat::handleTokenSpeedChanged(const QString &tokenSpeed)
|
||||
emit tokenSpeedChanged();
|
||||
}
|
||||
|
||||
void Chat::handleDatabaseResultsChanged(const QList<ResultInfo> &results)
|
||||
{
|
||||
m_databaseResults = results;
|
||||
}
|
||||
|
||||
bool Chat::serialize(QDataStream &stream, int version) const
|
||||
{
|
||||
stream << m_creationDate;
|
||||
|
Reference in New Issue
Block a user