mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-29 08:46:10 +00:00
Add thread count setting
This commit is contained in:
22
llm.cpp
22
llm.cpp
@@ -62,6 +62,7 @@ bool LLMObject::loadModelPrivate(const QString &modelName)
|
||||
auto fin = std::ifstream(filePath.toStdString(), std::ios::binary);
|
||||
m_llmodel->loadModel(modelName.toStdString(), fin);
|
||||
emit isModelLoadedChanged();
|
||||
emit threadCountChanged();
|
||||
}
|
||||
|
||||
if (m_llmodel)
|
||||
@@ -70,6 +71,15 @@ bool LLMObject::loadModelPrivate(const QString &modelName)
|
||||
return m_llmodel;
|
||||
}
|
||||
|
||||
void LLMObject::setThreadCount(int32_t n_threads) {
|
||||
m_llmodel->setThreadCount(n_threads);
|
||||
emit threadCountChanged();
|
||||
}
|
||||
|
||||
int32_t LLMObject::threadCount() {
|
||||
return m_llmodel->threadCount();
|
||||
}
|
||||
|
||||
bool LLMObject::isModelLoaded() const
|
||||
{
|
||||
return m_llmodel && m_llmodel->isModelLoaded();
|
||||
@@ -225,6 +235,9 @@ LLM::LLM()
|
||||
connect(m_llmodel, &LLMObject::responseStopped, this, &LLM::responseStopped, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &LLMObject::modelNameChanged, this, &LLM::modelNameChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &LLMObject::modelListChanged, this, &LLM::modelListChanged, Qt::QueuedConnection);
|
||||
connect(m_llmodel, &LLMObject::threadCountChanged, this, &LLM::threadCountChanged, Qt::QueuedConnection);
|
||||
|
||||
|
||||
connect(this, &LLM::promptRequested, m_llmodel, &LLMObject::prompt, Qt::QueuedConnection);
|
||||
connect(this, &LLM::modelNameChangeRequested, m_llmodel, &LLMObject::modelNameChangeRequested, Qt::QueuedConnection);
|
||||
|
||||
@@ -233,6 +246,7 @@ LLM::LLM()
|
||||
connect(this, &LLM::regenerateResponseRequested, m_llmodel, &LLMObject::regenerateResponse, Qt::BlockingQueuedConnection);
|
||||
connect(this, &LLM::resetResponseRequested, m_llmodel, &LLMObject::resetResponse, Qt::BlockingQueuedConnection);
|
||||
connect(this, &LLM::resetContextRequested, m_llmodel, &LLMObject::resetContext, Qt::BlockingQueuedConnection);
|
||||
connect(this, &LLM::setThreadCountRequested, m_llmodel, &LLMObject::setThreadCount, Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
bool LLM::isModelLoaded() const
|
||||
@@ -300,6 +314,14 @@ QList<QString> LLM::modelList() const
|
||||
return m_llmodel->modelList();
|
||||
}
|
||||
|
||||
void LLM::setThreadCount(int32_t n_threads) {
|
||||
emit setThreadCountRequested(n_threads);
|
||||
}
|
||||
|
||||
int32_t LLM::threadCount() {
|
||||
return m_llmodel->threadCount();
|
||||
}
|
||||
|
||||
bool LLM::checkForUpdates() const
|
||||
{
|
||||
#if defined(Q_OS_LINUX)
|
||||
|
Reference in New Issue
Block a user