Modellist temp

This commit is contained in:
Adam Treat
2023-06-22 15:44:49 -04:00
parent c1794597a7
commit 7f01b153b3
25 changed files with 1784 additions and 1108 deletions

View File

@@ -1,6 +1,8 @@
#include "llm.h"
#include "config.h"
#include "download.h"
#include "sysinfo.h"
#include "chatlistmodel.h"
#include "../gpt4all-backend/llmodel.h"
#include "network.h"
#include <QCoreApplication>
@@ -20,7 +22,6 @@ LLM *LLM::globalInstance()
LLM::LLM()
: QObject{nullptr}
, m_chatListModel(new ChatListModel(this))
, m_threadCount(std::min(4, (int32_t) std::thread::hardware_concurrency()))
, m_serverEnabled(false)
, m_compatHardware(true)
@@ -39,7 +40,7 @@ LLM::LLM()
#endif
LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
connect(this, &LLM::serverEnabledChanged,
m_chatListModel, &ChatListModel::handleServerEnabledChanged);
ChatListModel::globalInstance(), &ChatListModel::handleServerEnabledChanged);
#if defined(__x86_64__)
#ifndef _MSC_VER
@@ -95,6 +96,16 @@ bool LLM::fileExists(const QString &path) const
return info.exists() && info.isFile();
}
qint64 LLM::systemTotalRAMInGB() const
{
return getSystemTotalRAMInGB();
}
QString LLM::systemTotalRAMInGBString() const
{
return QString::fromStdString(getSystemTotalRAMInGBString());
}
int32_t LLM::threadCount() const
{
return m_threadCount;