Latest rebase on llama.cpp with gguf support.

This commit is contained in:
Adam Treat
2023-09-21 12:41:48 -04:00
parent 5f3d739205
commit d90d003a1d
12 changed files with 245 additions and 85 deletions

View File

@@ -356,10 +356,10 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
emit modelLoadingError(QString("Could not find file for model %1").arg(modelInfo.filename()));
}
if (m_llModelInfo.model)
if (m_llModelInfo.model) {
setModelInfo(modelInfo);
processSystemPrompt();
processSystemPrompt();
}
return m_llModelInfo.model;
}

View File

@@ -189,7 +189,7 @@ Window {
+ "causes include a bad file format, an incomplete or corrupted download, the wrong file "
+ "type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:"
+ "<br><ul>"
+ "<li>Ensure the model file has a compatible ggml format and type"
+ "<li>Ensure the model file has a compatible format and type"
+ "<li>Check the model file is complete in the download folder"
+ "<li>You can find the download folder in the settings dialog"
+ "<li>If you've sideloaded the model ensure the file is not corrupt by checking md5sum"

View File

@@ -796,7 +796,7 @@ void ModelList::updateModelsFromDirectory()
QString filename = it.fileName();
// All files that end with .bin and have 'ggml' somewhere in the name
if ((filename.endsWith(".bin") && filename.contains("ggml") && !filename.startsWith("incomplete"))
if (((filename.endsWith(".bin") || filename.endsWith(".gguf")) && (/*filename.contains("ggml") ||*/ filename.contains("gguf")) && !filename.startsWith("incomplete"))
|| (filename.endsWith(".txt") && filename.startsWith("chatgpt-"))) {
QString filePath = it.filePath();