mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 02:50:36 +00:00
fix/macm1ts (#1746)
* make runtime library backend universal searchable * corepack enable * fix * pass tests * simpler * add more jsdoc * fix testS * fix up circle ci * bump version * remove false positive warning * add disclaimer * update readme * revert * update ts docs --------- Co-authored-by: Matthew Nguyen <matthewpnguyen@Matthews-MacBook-Pro-7.local>
This commit is contained in:
@@ -81,7 +81,7 @@ Napi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo& info)
|
||||
Napi::Value NodeModelWrapper::InitGpuByString(const Napi::CallbackInfo& info)
|
||||
{
|
||||
auto env = info.Env();
|
||||
uint32_t memory_required = info[0].As<Napi::Number>();
|
||||
size_t memory_required = static_cast<size_t>(info[0].As<Napi::Number>().Uint32Value());
|
||||
|
||||
std::string gpu_device_identifier = info[1].As<Napi::String>();
|
||||
|
||||
@@ -149,16 +149,14 @@ Napi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo& info)
|
||||
}
|
||||
if(device != "cpu") {
|
||||
size_t mem = llmodel_required_mem(GetInference(), full_weight_path.c_str());
|
||||
if(mem == 0) {
|
||||
std::cout << "WARNING: no memory needed. does this model support gpu?\n";
|
||||
}
|
||||
std::cout << "Initiating GPU\n";
|
||||
std::cout << "Memory required estimation: " << mem << "\n";
|
||||
|
||||
auto success = llmodel_gpu_init_gpu_device_by_string(GetInference(), mem, device.c_str());
|
||||
if(success) {
|
||||
std::cout << "GPU init successfully\n";
|
||||
} else {
|
||||
//https://github.com/nomic-ai/gpt4all/blob/3acbef14b7c2436fe033cae9036e695d77461a16/gpt4all-bindings/python/gpt4all/pyllmodel.py#L215
|
||||
//Haven't implemented this but it is still open to contribution
|
||||
std::cout << "WARNING: Failed to init GPU\n";
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user