mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-22 05:29:20 +00:00
llamamodel: fix semantic typo in nomic client dynamic mode (#2216)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
46818e466e
commit
3f8257c563
@ -302,8 +302,8 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl)
|
|||||||
|
|
||||||
if (llama_verbose()) {
|
if (llama_verbose()) {
|
||||||
std::cerr << "llama.cpp: using Metal" << std::endl;
|
std::cerr << "llama.cpp: using Metal" << std::endl;
|
||||||
d_ptr->backend_name = "metal";
|
|
||||||
}
|
}
|
||||||
|
d_ptr->backend_name = "metal";
|
||||||
|
|
||||||
// always fully offload on Metal
|
// always fully offload on Metal
|
||||||
// TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model
|
// TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model
|
||||||
|
@ -68,7 +68,7 @@ def get_long_description():
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name=package_name,
|
name=package_name,
|
||||||
version="2.5.0",
|
version="2.5.1",
|
||||||
description="Python bindings for GPT4All",
|
description="Python bindings for GPT4All",
|
||||||
long_description=get_long_description(),
|
long_description=get_long_description(),
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
|
Loading…
Reference in New Issue
Block a user