mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 19:10:15 +00:00
llamamodel: fix semantic typo in nomic client dynamic mode (#2216)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -302,8 +302,8 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl)
|
||||
|
||||
if (llama_verbose()) {
|
||||
std::cerr << "llama.cpp: using Metal" << std::endl;
|
||||
d_ptr->backend_name = "metal";
|
||||
}
|
||||
d_ptr->backend_name = "metal";
|
||||
|
||||
// always fully offload on Metal
|
||||
// TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model
|
||||
|
Reference in New Issue
Block a user