diff --git a/gpt4all-backend/llmodel_shared.cpp b/gpt4all-backend/llmodel_shared.cpp
index 665da9c9..15696033 100644
--- a/gpt4all-backend/llmodel_shared.cpp
+++ b/gpt4all-backend/llmodel_shared.cpp
@@ -136,14 +136,17 @@ void LLModel::prompt(const std::string &prompt,
}
// decode the rest of the prompt template
+ // template: end of assistant prompt
+ std::string asstSuffix;
if (placeholders.size() >= 2) {
- // template: end of assistant prompt
size_t start = placeholders[1].position() + placeholders[1].length();
- auto asstSuffix = promptTemplate.substr(start);
- if (!asstSuffix.empty()) {
- embd_inp = tokenize(promptCtx, asstSuffix, true);
- decodePrompt(promptCallback, responseCallback, recalculateCallback, promptCtx, embd_inp);
- }
+ asstSuffix = promptTemplate.substr(start);
+ } else {
+ asstSuffix = "\n\n"; // default to a blank link, good for e.g. Alpaca
+ }
+ if (!asstSuffix.empty()) {
+ embd_inp = tokenize(promptCtx, asstSuffix, true);
+ decodePrompt(promptCallback, responseCallback, recalculateCallback, promptCtx, embd_inp);
}
}
diff --git a/gpt4all-bindings/python/docs/gpt4all_python.md b/gpt4all-bindings/python/docs/gpt4all_python.md
index 7e56fabe..bdbe7702 100644
--- a/gpt4all-bindings/python/docs/gpt4all_python.md
+++ b/gpt4all-bindings/python/docs/gpt4all_python.md
@@ -263,7 +263,7 @@ logging infrastructure offers [many more customization options][py-logging-cookb
logging.basicConfig(level=logging.INFO)
model = GPT4All('nous-hermes-llama2-13b.Q4_0.gguf')
with model.chat_session('You are a geography expert.\nBe terse.',
- '### Instruction:\n{0}\n### Response:\n'):
+ '### Instruction:\n{0}\n\n### Response:\n'):
response = model.generate('who are you?', temp=0)
print(response)
response = model.generate('what are your favorite 3 mountains?', temp=0)
@@ -277,6 +277,7 @@ logging infrastructure offers [many more customization options][py-logging-cookb
### Instruction:
who are you?
+
### Response:
===/LLModel.prompt_model -- prompt/===
@@ -284,6 +285,7 @@ logging infrastructure offers [many more customization options][py-logging-cookb
INFO:gpt4all.pyllmodel:LLModel.prompt_model -- prompt:
### Instruction:
what are your favorite 3 mountains?
+
### Response:
===/LLModel.prompt_model -- prompt/===
@@ -315,10 +317,10 @@ are used instead of model-specific system and prompt templates:
=== "Output"
```
default system template: ''
- default prompt template: '### Human: \n{0}\n### Assistant:\n'
+ default prompt template: '### Human: \n{0}\n\n### Assistant:\n'
session system template: ''
- session prompt template: '### Human: \n{0}\n### Assistant:\n'
+ session prompt template: '### Human: \n{0}\n\n### Assistant:\n'
```
diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py
index 4510ec32..54cf0d33 100644
--- a/gpt4all-bindings/python/gpt4all/gpt4all.py
+++ b/gpt4all-bindings/python/gpt4all/gpt4all.py
@@ -24,7 +24,7 @@ DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").re
DEFAULT_MODEL_CONFIG = {
"systemPrompt": "",
- "promptTemplate": "### Human: \n{0}\n### Assistant:\n",
+ "promptTemplate": "### Human: \n{0}\n\n### Assistant:\n",
}
ConfigType = Dict[str, str]
diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json
index 03a60de3..28833d05 100644
--- a/gpt4all-chat/metadata/models3.json
+++ b/gpt4all-chat/metadata/models3.json
@@ -45,7 +45,7 @@
"systemPrompt": " ",
"description": "Very fast model with good quality