mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-05 10:30:29 +00:00
Modellist temp
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
#include "llm.h"
|
||||
#include "config.h"
|
||||
#include "download.h"
|
||||
#include "sysinfo.h"
|
||||
#include "chatlistmodel.h"
|
||||
#include "../gpt4all-backend/llmodel.h"
|
||||
#include "network.h"
|
||||
|
||||
#include <QCoreApplication>
|
||||
@@ -20,7 +22,6 @@ LLM *LLM::globalInstance()
|
||||
|
||||
LLM::LLM()
|
||||
: QObject{nullptr}
|
||||
, m_chatListModel(new ChatListModel(this))
|
||||
, m_threadCount(std::min(4, (int32_t) std::thread::hardware_concurrency()))
|
||||
, m_serverEnabled(false)
|
||||
, m_compatHardware(true)
|
||||
@@ -39,7 +40,7 @@ LLM::LLM()
|
||||
#endif
|
||||
LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
|
||||
connect(this, &LLM::serverEnabledChanged,
|
||||
m_chatListModel, &ChatListModel::handleServerEnabledChanged);
|
||||
ChatListModel::globalInstance(), &ChatListModel::handleServerEnabledChanged);
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#ifndef _MSC_VER
|
||||
@@ -95,6 +96,16 @@ bool LLM::fileExists(const QString &path) const
|
||||
return info.exists() && info.isFile();
|
||||
}
|
||||
|
||||
qint64 LLM::systemTotalRAMInGB() const
|
||||
{
|
||||
return getSystemTotalRAMInGB();
|
||||
}
|
||||
|
||||
QString LLM::systemTotalRAMInGBString() const
|
||||
{
|
||||
return QString::fromStdString(getSystemTotalRAMInGBString());
|
||||
}
|
||||
|
||||
int32_t LLM::threadCount() const
|
||||
{
|
||||
return m_threadCount;
|
||||
|
Reference in New Issue
Block a user