llmodel: default to a blank line between reply and next prompt (#1996)

Also make some related adjustments to the provided Alpaca-style prompt templates
and system prompts.

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-02-26 13:11:15 -05:00
committed by GitHub
parent fc1a281381
commit f500bcf6e5
5 changed files with 20 additions and 15 deletions

View File

@@ -136,14 +136,17 @@ void LLModel::prompt(const std::string &prompt,
}
// decode the rest of the prompt template
// template: end of assistant prompt
std::string asstSuffix;
if (placeholders.size() >= 2) {
// template: end of assistant prompt
size_t start = placeholders[1].position() + placeholders[1].length();
auto asstSuffix = promptTemplate.substr(start);
if (!asstSuffix.empty()) {
embd_inp = tokenize(promptCtx, asstSuffix, true);
decodePrompt(promptCallback, responseCallback, recalculateCallback, promptCtx, embd_inp);
}
asstSuffix = promptTemplate.substr(start);
} else {
asstSuffix = "\n\n"; // default to a blank link, good for e.g. Alpaca
}
if (!asstSuffix.empty()) {
embd_inp = tokenize(promptCtx, asstSuffix, true);
decodePrompt(promptCallback, responseCallback, recalculateCallback, promptCtx, embd_inp);
}
}