Clean up backend code a bit and hide impl. details.

This commit is contained in:
Adam Treat
2023-07-08 10:04:38 -04:00
committed by AT
parent 33557b1f39
commit 1f749d7633
6 changed files with 72 additions and 71 deletions

View File

@@ -240,11 +240,11 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
#if defined(Q_OS_MAC) && defined(__arm__)
if (m_forceMetal)
m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "metal");
m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "metal");
else
m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "auto");
m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto");
#else
m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "auto");
m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto");
#endif
if (m_llModelInfo.model) {
@@ -258,7 +258,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
m_llModelInfo = LLModelInfo();
emit modelLoadingError(QString("Could not load model due to invalid model file for %1").arg(modelInfo.filename()));
} else {
switch (m_llModelInfo.model->implementation().modelType[0]) {
switch (m_llModelInfo.model->implementation().modelType()[0]) {
case 'L': m_llModelType = LLModelType::LLAMA_; break;
case 'G': m_llModelType = LLModelType::GPTJ_; break;
case 'M': m_llModelType = LLModelType::MPT_; break;

View File

@@ -34,7 +34,7 @@ LLM::LLM()
if (directoryExists(frameworksDir))
llmodelSearchPaths += ";" + frameworksDir;
#endif
LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
LLMImplementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString());
#if defined(__x86_64__)
#ifndef _MSC_VER