mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-20 08:53:37 +00:00
Don't start recalculating context immediately upon switching to a new chat
but rather wait until the first prompt. This allows users to switch between chats fast and to delete chats more easily. Fixes issue #1545
This commit is contained in:
parent
7bcd9e8089
commit
dc2e7d6e9b
@ -503,6 +503,11 @@ bool ChatLLM::handleRecalculate(bool isRecalc)
|
|||||||
}
|
}
|
||||||
bool ChatLLM::prompt(const QList<QString> &collectionList, const QString &prompt)
|
bool ChatLLM::prompt(const QList<QString> &collectionList, const QString &prompt)
|
||||||
{
|
{
|
||||||
|
if (m_restoreStateFromText) {
|
||||||
|
Q_ASSERT(m_state.isEmpty());
|
||||||
|
processRestoreStateFromText();
|
||||||
|
}
|
||||||
|
|
||||||
if (!m_processedSystemPrompt)
|
if (!m_processedSystemPrompt)
|
||||||
processSystemPrompt();
|
processSystemPrompt();
|
||||||
const QString promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo);
|
const QString promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo);
|
||||||
@ -906,11 +911,6 @@ void ChatLLM::restoreState()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_restoreStateFromText) {
|
|
||||||
Q_ASSERT(m_state.isEmpty());
|
|
||||||
processRestoreStateFromText();
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(DEBUG)
|
#if defined(DEBUG)
|
||||||
qDebug() << "restoreState" << m_llmThread.objectName() << "size:" << m_state.size();
|
qDebug() << "restoreState" << m_llmThread.objectName() << "size:" << m_state.size();
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user