mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-29 08:46:10 +00:00
Get rid of blocking behavior for regenerate response.
This commit is contained in:
@@ -146,7 +146,7 @@ bool ChatLLM::loadModel(const QString &modelName)
|
||||
// We have a live model, but it isn't the one we want
|
||||
bool alreadyAcquired = isModelLoaded();
|
||||
if (alreadyAcquired) {
|
||||
resetContextProtected();
|
||||
resetContext();
|
||||
#if defined(DEBUG_MODEL_LOADING)
|
||||
qDebug() << "already acquired model deleted" << m_chat->id() << m_modelInfo.model;
|
||||
#endif
|
||||
@@ -301,12 +301,6 @@ void ChatLLM::resetResponse()
|
||||
}
|
||||
|
||||
void ChatLLM::resetContext()
|
||||
{
|
||||
resetContextProtected();
|
||||
emit sendResetContext();
|
||||
}
|
||||
|
||||
void ChatLLM::resetContextProtected()
|
||||
{
|
||||
regenerateResponse();
|
||||
m_ctx = LLModel::PromptContext();
|
||||
|
Reference in New Issue
Block a user