mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-05 10:03:23 +00:00
Fix loaded chats forgetting context with non-empty system prompt (#3015)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
3025f9deff
commit
88b95950c5
@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||
- Fix a crash when attempting to continue a chat loaded from disk ([#2995](https://github.com/nomic-ai/gpt4all/pull/2995))
|
||||
- Fix the local server rejecting min\_p/top\_p less than 1 ([#2996](https://github.com/nomic-ai/gpt4all/pull/2996))
|
||||
- Fix "regenerate" always forgetting the most recent message ([#3011](https://github.com/nomic-ai/gpt4all/pull/3011))
|
||||
- Fix loaded chats forgetting context when there is a system prompt ([#3015](https://github.com/nomic-ai/gpt4all/pull/3015))
|
||||
|
||||
## [3.3.1] - 2024-09-27 ([v3.3.y](https://github.com/nomic-ai/gpt4all/tree/v3.3.y))
|
||||
|
||||
|
@ -1230,19 +1230,16 @@ void ChatLLM::restoreState()
|
||||
void ChatLLM::processSystemPrompt()
|
||||
{
|
||||
Q_ASSERT(isModelLoaded());
|
||||
if (!isModelLoaded() || m_processedSystemPrompt || m_restoreStateFromText)
|
||||
if (!isModelLoaded() || m_processedSystemPrompt)
|
||||
return;
|
||||
|
||||
const std::string systemPrompt = MySettings::globalInstance()->modelSystemPrompt(m_modelInfo).toStdString();
|
||||
if (QString::fromStdString(systemPrompt).trimmed().isEmpty()) {
|
||||
m_processedSystemPrompt = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Start with a whole new context
|
||||
m_stopGenerating = false;
|
||||
m_ctx = LLModel::PromptContext();
|
||||
|
||||
if (!QString::fromStdString(systemPrompt).trimmed().isEmpty()) {
|
||||
auto promptFunc = std::bind(&ChatLLM::handleSystemPrompt, this, std::placeholders::_1);
|
||||
|
||||
const int32_t n_predict = MySettings::globalInstance()->modelMaxLength(m_modelInfo);
|
||||
@ -1275,6 +1272,7 @@ void ChatLLM::processSystemPrompt()
|
||||
printf("\n");
|
||||
fflush(stdout);
|
||||
#endif
|
||||
}
|
||||
|
||||
m_processedSystemPrompt = m_stopGenerating == false;
|
||||
m_pristineLoadedState = false;
|
||||
@ -1286,11 +1284,12 @@ void ChatLLM::processRestoreStateFromText()
|
||||
if (!isModelLoaded() || !m_restoreStateFromText || m_isServer)
|
||||
return;
|
||||
|
||||
processSystemPrompt();
|
||||
|
||||
m_restoringFromText = true;
|
||||
emit restoringFromTextChanged();
|
||||
|
||||
m_stopGenerating = false;
|
||||
m_ctx = LLModel::PromptContext();
|
||||
|
||||
auto promptFunc = std::bind(&ChatLLM::handleRestoreStateFromTextPrompt, this, std::placeholders::_1);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user