mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-10 04:49:07 +00:00
llmodel: skip attempting Metal if model+kvcache > 53% of system ram
This commit is contained in:
@@ -178,7 +178,9 @@ int32_t LLamaModel::threadCount() const {
|
||||
|
||||
LLamaModel::~LLamaModel()
|
||||
{
|
||||
llama_free(d_ptr->ctx);
|
||||
if(d_ptr->ctx) {
|
||||
llama_free(d_ptr->ctx);
|
||||
}
|
||||
}
|
||||
|
||||
bool LLamaModel::isModelLoaded() const
|
||||
|
Reference in New Issue
Block a user