From 17dee0228780758c24b21136f74e6cc28d6849b4 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 6 Mar 2024 12:59:34 -0500 Subject: [PATCH] Fix for issue #2080 where the GUI appears to hang when a chat with a large model is deleted. There is no reason to save the context for a chat that is being deleted. Signed-off-by: Adam Treat --- gpt4all-chat/chat.cpp | 5 +++++ gpt4all-chat/chat.h | 1 + gpt4all-chat/chatlistmodel.h | 2 ++ gpt4all-chat/chatllm.cpp | 5 ++++- gpt4all-chat/chatllm.h | 2 ++ 5 files changed, 14 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 86ea1a94..fe762648 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -286,6 +286,11 @@ void Chat::unloadAndDeleteLater() unloadModel(); } +void Chat::markForDeletion() +{ + m_llmodel->setMarkedForDeletion(true); +} + void Chat::unloadModel() { stopGenerating(); diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index 0f8b537a..423964f4 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -84,6 +84,7 @@ public: Q_INVOKABLE void forceReloadModel(); Q_INVOKABLE void trySwitchContextOfLoadedModel(); void unloadAndDeleteLater(); + void markForDeletion(); qint64 creationDate() const { return m_creationDate; } bool serialize(QDataStream &stream, int version) const; diff --git a/gpt4all-chat/chatlistmodel.h b/gpt4all-chat/chatlistmodel.h index fbbb99a3..7460bc80 100644 --- a/gpt4all-chat/chatlistmodel.h +++ b/gpt4all-chat/chatlistmodel.h @@ -143,6 +143,8 @@ public: m_newChat = nullptr; } + chat->markForDeletion(); + const int index = m_chats.indexOf(chat); if (m_chats.count() < 3 /*m_serverChat included*/) { addChat(); diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 5650b43d..3d2ecb18 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -64,6 +64,7 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer) , m_isRecalc(false) , m_shouldBeLoaded(false) , m_forceUnloadModel(false) + , m_markedForDeletion(false) , m_shouldTrySwitchContext(false) , m_stopGenerating(false) , m_timer(nullptr) @@ -690,7 +691,9 @@ void ChatLLM::unloadModel() else emit modelLoadingPercentageChanged(std::numeric_limits::min()); // small non-zero positive value - saveState(); + if (!m_markedForDeletion) + saveState(); + #if defined(DEBUG_MODEL_LOADING) qDebug() << "unloadModel" << m_llmThread.objectName() << m_llModelInfo.model; #endif diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index 03e6edc6..3c22f998 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -84,6 +84,7 @@ public: void setShouldBeLoaded(bool b); void setShouldTrySwitchContext(bool b); void setForceUnloadModel(bool b) { m_forceUnloadModel = b; } + void setMarkedForDeletion(bool b) { m_markedForDeletion = b; } QString response() const; @@ -177,6 +178,7 @@ private: std::atomic m_shouldTrySwitchContext; std::atomic m_isRecalc; std::atomic m_forceUnloadModel; + std::atomic m_markedForDeletion; bool m_isServer; bool m_forceMetal; bool m_reloadingToChangeVariant;