mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-28 16:30:56 +00:00
Maxwell/Pascal GPU support and crash fix (#1895)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Submodule gpt4all-backend/llama.cpp-mainline updated: d2e3b5fd46...b0df9e6fad
@@ -196,6 +196,7 @@ public:
|
|||||||
m_newChat = nullptr;
|
m_newChat = nullptr;
|
||||||
m_serverChat = nullptr;
|
m_serverChat = nullptr;
|
||||||
m_currentChat = nullptr;
|
m_currentChat = nullptr;
|
||||||
|
for (auto * chat: m_chats) { delete chat; }
|
||||||
m_chats.clear();
|
m_chats.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -63,9 +63,11 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int res = app.exec();
|
||||||
|
|
||||||
// Make sure ChatLLM threads are joined before global destructors run.
|
// Make sure ChatLLM threads are joined before global destructors run.
|
||||||
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
||||||
ChatListModel::globalInstance()->clearChats();
|
ChatListModel::globalInstance()->clearChats();
|
||||||
|
|
||||||
return app.exec();
|
return res;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user