mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-04 01:54:49 +00:00
llmodel: default to a blank line between reply and next prompt (#1996)
Also make some related adjustments to the provided Alpaca-style prompt templates and system prompts. Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -136,14 +136,17 @@ void LLModel::prompt(const std::string &prompt,
|
||||
}
|
||||
|
||||
// decode the rest of the prompt template
|
||||
// template: end of assistant prompt
|
||||
std::string asstSuffix;
|
||||
if (placeholders.size() >= 2) {
|
||||
// template: end of assistant prompt
|
||||
size_t start = placeholders[1].position() + placeholders[1].length();
|
||||
auto asstSuffix = promptTemplate.substr(start);
|
||||
if (!asstSuffix.empty()) {
|
||||
embd_inp = tokenize(promptCtx, asstSuffix, true);
|
||||
decodePrompt(promptCallback, responseCallback, recalculateCallback, promptCtx, embd_inp);
|
||||
}
|
||||
asstSuffix = promptTemplate.substr(start);
|
||||
} else {
|
||||
asstSuffix = "\n\n"; // default to a blank link, good for e.g. Alpaca
|
||||
}
|
||||
if (!asstSuffix.empty()) {
|
||||
embd_inp = tokenize(promptCtx, asstSuffix, true);
|
||||
decodePrompt(promptCallback, responseCallback, recalculateCallback, promptCtx, embd_inp);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user