mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-13 21:56:00 +00:00
Fix for issue #2080 where the GUI appears to hang when a chat with a large
model is deleted. There is no reason to save the context for a chat that is being deleted. Signed-off-by: Adam Treat <treat.adam@gmail.com>
This commit is contained in:
parent
44717682a7
commit
17dee02287
@ -286,6 +286,11 @@ void Chat::unloadAndDeleteLater()
|
|||||||
unloadModel();
|
unloadModel();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Chat::markForDeletion()
|
||||||
|
{
|
||||||
|
m_llmodel->setMarkedForDeletion(true);
|
||||||
|
}
|
||||||
|
|
||||||
void Chat::unloadModel()
|
void Chat::unloadModel()
|
||||||
{
|
{
|
||||||
stopGenerating();
|
stopGenerating();
|
||||||
|
@ -84,6 +84,7 @@ public:
|
|||||||
Q_INVOKABLE void forceReloadModel();
|
Q_INVOKABLE void forceReloadModel();
|
||||||
Q_INVOKABLE void trySwitchContextOfLoadedModel();
|
Q_INVOKABLE void trySwitchContextOfLoadedModel();
|
||||||
void unloadAndDeleteLater();
|
void unloadAndDeleteLater();
|
||||||
|
void markForDeletion();
|
||||||
|
|
||||||
qint64 creationDate() const { return m_creationDate; }
|
qint64 creationDate() const { return m_creationDate; }
|
||||||
bool serialize(QDataStream &stream, int version) const;
|
bool serialize(QDataStream &stream, int version) const;
|
||||||
|
@ -143,6 +143,8 @@ public:
|
|||||||
m_newChat = nullptr;
|
m_newChat = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
chat->markForDeletion();
|
||||||
|
|
||||||
const int index = m_chats.indexOf(chat);
|
const int index = m_chats.indexOf(chat);
|
||||||
if (m_chats.count() < 3 /*m_serverChat included*/) {
|
if (m_chats.count() < 3 /*m_serverChat included*/) {
|
||||||
addChat();
|
addChat();
|
||||||
|
@ -64,6 +64,7 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer)
|
|||||||
, m_isRecalc(false)
|
, m_isRecalc(false)
|
||||||
, m_shouldBeLoaded(false)
|
, m_shouldBeLoaded(false)
|
||||||
, m_forceUnloadModel(false)
|
, m_forceUnloadModel(false)
|
||||||
|
, m_markedForDeletion(false)
|
||||||
, m_shouldTrySwitchContext(false)
|
, m_shouldTrySwitchContext(false)
|
||||||
, m_stopGenerating(false)
|
, m_stopGenerating(false)
|
||||||
, m_timer(nullptr)
|
, m_timer(nullptr)
|
||||||
@ -690,7 +691,9 @@ void ChatLLM::unloadModel()
|
|||||||
else
|
else
|
||||||
emit modelLoadingPercentageChanged(std::numeric_limits<float>::min()); // small non-zero positive value
|
emit modelLoadingPercentageChanged(std::numeric_limits<float>::min()); // small non-zero positive value
|
||||||
|
|
||||||
saveState();
|
if (!m_markedForDeletion)
|
||||||
|
saveState();
|
||||||
|
|
||||||
#if defined(DEBUG_MODEL_LOADING)
|
#if defined(DEBUG_MODEL_LOADING)
|
||||||
qDebug() << "unloadModel" << m_llmThread.objectName() << m_llModelInfo.model;
|
qDebug() << "unloadModel" << m_llmThread.objectName() << m_llModelInfo.model;
|
||||||
#endif
|
#endif
|
||||||
|
@ -84,6 +84,7 @@ public:
|
|||||||
void setShouldBeLoaded(bool b);
|
void setShouldBeLoaded(bool b);
|
||||||
void setShouldTrySwitchContext(bool b);
|
void setShouldTrySwitchContext(bool b);
|
||||||
void setForceUnloadModel(bool b) { m_forceUnloadModel = b; }
|
void setForceUnloadModel(bool b) { m_forceUnloadModel = b; }
|
||||||
|
void setMarkedForDeletion(bool b) { m_markedForDeletion = b; }
|
||||||
|
|
||||||
QString response() const;
|
QString response() const;
|
||||||
|
|
||||||
@ -177,6 +178,7 @@ private:
|
|||||||
std::atomic<bool> m_shouldTrySwitchContext;
|
std::atomic<bool> m_shouldTrySwitchContext;
|
||||||
std::atomic<bool> m_isRecalc;
|
std::atomic<bool> m_isRecalc;
|
||||||
std::atomic<bool> m_forceUnloadModel;
|
std::atomic<bool> m_forceUnloadModel;
|
||||||
|
std::atomic<bool> m_markedForDeletion;
|
||||||
bool m_isServer;
|
bool m_isServer;
|
||||||
bool m_forceMetal;
|
bool m_forceMetal;
|
||||||
bool m_reloadingToChangeVariant;
|
bool m_reloadingToChangeVariant;
|
||||||
|
Loading…
Reference in New Issue
Block a user