diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp
index 088083d5..0f5da45f 100644
--- a/gpt4all-chat/chatllm.cpp
+++ b/gpt4all-chat/chatllm.cpp
@@ -788,13 +788,18 @@ void ChatLLM::processSystemPrompt()
if (!isModelLoaded() || m_processedSystemPrompt || m_isServer)
return;
+ const std::string systemPrompt = MySettings::globalInstance()->modelSystemPrompt(m_modelInfo).toStdString();
+ if (systemPrompt.empty()) {
+ m_processedSystemPrompt = true;
+ return;
+ }
+
m_stopGenerating = false;
auto promptFunc = std::bind(&ChatLLM::handleSystemPrompt, this, std::placeholders::_1);
auto responseFunc = std::bind(&ChatLLM::handleSystemResponse, this, std::placeholders::_1,
std::placeholders::_2);
auto recalcFunc = std::bind(&ChatLLM::handleSystemRecalculate, this, std::placeholders::_1);
- const std::string systemPrompt = MySettings::globalInstance()->modelSystemPrompt(m_modelInfo).toStdString();
const int32_t n_predict = MySettings::globalInstance()->modelMaxLength(m_modelInfo);
const int32_t top_k = MySettings::globalInstance()->modelTopK(m_modelInfo);
const float top_p = MySettings::globalInstance()->modelTopP(m_modelInfo);
diff --git a/gpt4all-chat/metadata/models.json b/gpt4all-chat/metadata/models.json
index f0578f71..13928a62 100644
--- a/gpt4all-chat/metadata/models.json
+++ b/gpt4all-chat/metadata/models.json
@@ -9,6 +9,7 @@
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Best overall model
- Instruction based
- Gives very long responses
- Finetuned with only 1k of high-quality data
- Trained by Microsoft and Peking University
- Cannot be used commercially
Best overall smaller model
- Fast responses
- Instruction based
- Trained by TII
- Finetuned by Nomic AI
- Licensed for commercial use
",
"url": "https://huggingface.co/nomic-ai/gpt4all-falcon-ggml/resolve/main/ggml-model-gpt4all-falcon-q4_0.bin",
"promptTemplate": "### Instruction:\n%1\n### Response:\n"
@@ -37,6 +39,7 @@
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Extremely good model
- Instruction based
- Gives long responses
- Curated with 300,000 uncensored instructions
- Trained by Nous Research
- Cannot be used commercially
",
"url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin",
"promptTemplate": "### Instruction:\n%1\n### Response:\n"
@@ -51,6 +54,7 @@
"parameters": "7 billion",
"quant": "q4_0",
"type": "GPT-J",
+ "systemPrompt": " ",
"description": "Creative model can be used for commercial purposes
- Fast responses
- Creative responses
- Instruction based
- Trained by Nomic AI
- Licensed for commercial use
"
},
{
@@ -64,6 +68,7 @@
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Very good overall model
- Instruction based
- Based on the same dataset as Groovy
- Slower than Groovy, with higher quality responses
- Trained by Nomic AI
- Cannot be used commercially
",
"url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
},
@@ -140,6 +145,7 @@
"parameters": "7 billion",
"quant": "q4_2",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
- Instruction based
- Cannot be used commercially
"
},
{
@@ -152,6 +158,7 @@
"parameters": "13 billion",
"quant": "q4_2",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
- Instruction based
- Cannot be used commercially
"
},
{
@@ -164,6 +171,7 @@
"parameters": "7 billion",
"quant": "q4_2",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Good small model - trained by by Microsoft and Peking University
- Instruction based
- Cannot be used commercially
"
},
{
@@ -190,6 +198,7 @@
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
+ "systemPrompt": " ",
"description": "Mosaic's instruction model
- Instruction based
- Trained by Mosaic ML
- Licensed for commercial use
"
},
{
@@ -203,6 +212,7 @@
"parameters": "7 billion",
"quant": "q4_0",
"type": "MPT",
+ "systemPrompt": " ",
"description": "Trained for text completion with no assistant finetuning
- Completion based
- Trained by Mosaic ML
- Licensed for commercial use
"
},
{
@@ -215,6 +225,7 @@
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Trained on ~180,000 instructions
- Instruction based
- Trained by Nous Research
- Cannot be used commercially
"
},
{
@@ -228,6 +239,7 @@
"parameters": "13 billion",
"quant": "q4_0",
"type": "LLaMA",
+ "systemPrompt": " ",
"description": "Trained on uncensored assistant data and instruction data
- Instruction based
- Cannot be used commercially
",
"url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin"
},
@@ -243,6 +255,7 @@
"parameters": "3 billion",
"quant": "f16",
"type": "Replit",
+ "systemPrompt": " ",
"description": "Trained on subset of the Stack
- Code completion based
- Licensed for commercial use
",
"url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin"
}