diff --git a/gpt4all-backend/llama.cpp-mainline b/gpt4all-backend/llama.cpp-mainline index d2e3b5fd..b0df9e6f 160000 --- a/gpt4all-backend/llama.cpp-mainline +++ b/gpt4all-backend/llama.cpp-mainline @@ -1 +1 @@ -Subproject commit d2e3b5fd464aa95d44b3aeb6d532ad7374f7478b +Subproject commit b0df9e6fad734a4cff48e80b120b72f1f2c39094 diff --git a/gpt4all-chat/chatlistmodel.h b/gpt4all-chat/chatlistmodel.h index 24a95c3d..3f99c622 100644 --- a/gpt4all-chat/chatlistmodel.h +++ b/gpt4all-chat/chatlistmodel.h @@ -196,6 +196,7 @@ public: m_newChat = nullptr; m_serverChat = nullptr; m_currentChat = nullptr; + for (auto * chat: m_chats) { delete chat; } m_chats.clear(); } diff --git a/gpt4all-chat/main.cpp b/gpt4all-chat/main.cpp index d015581a..7debe8be 100644 --- a/gpt4all-chat/main.cpp +++ b/gpt4all-chat/main.cpp @@ -63,9 +63,11 @@ int main(int argc, char *argv[]) } #endif + int res = app.exec(); + // Make sure ChatLLM threads are joined before global destructors run. // Otherwise, we can get a heap-use-after-free inside of llama.cpp. ChatListModel::globalInstance()->clearChats(); - return app.exec(); + return res; }