mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-01 08:38:35 +00:00
Maxwell/Pascal GPU support and crash fix (#1895)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -196,6 +196,7 @@ public:
|
||||
m_newChat = nullptr;
|
||||
m_serverChat = nullptr;
|
||||
m_currentChat = nullptr;
|
||||
for (auto * chat: m_chats) { delete chat; }
|
||||
m_chats.clear();
|
||||
}
|
||||
|
||||
|
@@ -63,9 +63,11 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
#endif
|
||||
|
||||
int res = app.exec();
|
||||
|
||||
// Make sure ChatLLM threads are joined before global destructors run.
|
||||
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
||||
ChatListModel::globalInstance()->clearChats();
|
||||
|
||||
return app.exec();
|
||||
return res;
|
||||
}
|
||||
|
Reference in New Issue
Block a user