mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-08-10 04:17:22 +00:00
Maxwell/Pascal GPU support and crash fix (#1895)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
b11c3f679e
commit
0a40e71652
@ -1 +1 @@
|
|||||||
Subproject commit d2e3b5fd464aa95d44b3aeb6d532ad7374f7478b
|
Subproject commit b0df9e6fad734a4cff48e80b120b72f1f2c39094
|
@ -196,6 +196,7 @@ public:
|
|||||||
m_newChat = nullptr;
|
m_newChat = nullptr;
|
||||||
m_serverChat = nullptr;
|
m_serverChat = nullptr;
|
||||||
m_currentChat = nullptr;
|
m_currentChat = nullptr;
|
||||||
|
for (auto * chat: m_chats) { delete chat; }
|
||||||
m_chats.clear();
|
m_chats.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,9 +63,11 @@ int main(int argc, char *argv[])
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int res = app.exec();
|
||||||
|
|
||||||
// Make sure ChatLLM threads are joined before global destructors run.
|
// Make sure ChatLLM threads are joined before global destructors run.
|
||||||
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
|
||||||
ChatListModel::globalInstance()->clearChats();
|
ChatListModel::globalInstance()->clearChats();
|
||||||
|
|
||||||
return app.exec();
|
return res;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user