mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 11:00:48 +00:00
chat: generate follow-up questions after response (#2634)
* user can configure the prompt and when they appear * also make the name generation prompt configurable Signed-off-by: Adam Treat <treat.adam@gmail.com> Signed-off-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -58,11 +58,13 @@ void Chat::connectLLM()
|
||||
connect(m_llmodel, &ChatLLM::modelLoadingPercentageChanged, this, &Chat::handleModelLoadingPercentageChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::responseChanged, this, &Chat::handleResponseChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::promptProcessing, this, &Chat::promptProcessing, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::generatingQuestions, this, &Chat::generatingQuestions, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::responseStopped, this, &Chat::responseStopped, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::modelLoadingError, this, &Chat::handleModelLoadingError, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::modelLoadingWarning, this, &Chat::modelLoadingWarning, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::recalcChanged, this, &Chat::handleRecalculating, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::generatedNameChanged, this, &Chat::generatedNameChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::generatedQuestionFinished, this, &Chat::generatedQuestionFinished, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::reportSpeed, this, &Chat::handleTokenSpeedChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::loadedModelInfoChanged, this, &Chat::loadedModelInfoChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &ChatLLM::databaseResultsChanged, this, &Chat::handleDatabaseResultsChanged, Qt::QueuedConnection);
|
||||
@@ -113,6 +115,8 @@ void Chat::resetResponseState()
|
||||
if (m_responseInProgress && m_responseState == Chat::LocalDocsRetrieval)
|
||||
return;
|
||||
|
||||
m_generatedQuestions = QList<QString>();
|
||||
emit generatedQuestionsChanged();
|
||||
m_tokenSpeed = QString();
|
||||
emit tokenSpeedChanged();
|
||||
m_responseInProgress = true;
|
||||
@@ -186,6 +190,12 @@ void Chat::handleModelLoadingPercentageChanged(float loadingPercentage)
|
||||
void Chat::promptProcessing()
|
||||
{
|
||||
m_responseState = !databaseResults().isEmpty() ? Chat::LocalDocsProcessing : Chat::PromptProcessing;
|
||||
emit responseStateChanged();
|
||||
}
|
||||
|
||||
void Chat::generatingQuestions()
|
||||
{
|
||||
m_responseState = Chat::GeneratingQuestions;
|
||||
emit responseStateChanged();
|
||||
}
|
||||
|
||||
@@ -304,6 +314,12 @@ void Chat::generatedNameChanged(const QString &name)
|
||||
emit nameChanged();
|
||||
}
|
||||
|
||||
void Chat::generatedQuestionFinished(const QString &question)
|
||||
{
|
||||
m_generatedQuestions << question;
|
||||
emit generatedQuestionsChanged();
|
||||
}
|
||||
|
||||
void Chat::handleRecalculating()
|
||||
{
|
||||
Network::globalInstance()->trackChatEvent("recalc_context", { {"length", m_chatModel->count()} });
|
||||
|
Reference in New Issue
Block a user