mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-07-04 11:08:01 +00:00
chatllm: use a better prompt for the generated chat name (#2322)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
f26e8d0d87
commit
5fb9d17c00
@ -782,13 +782,13 @@ void ChatLLM::generateName()
|
|||||||
if (!isModelLoaded())
|
if (!isModelLoaded())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
std::string instructPrompt("### Instruction:\n%1\n### Response:\n"); // standard Alpaca
|
auto promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo);
|
||||||
auto promptFunc = std::bind(&ChatLLM::handleNamePrompt, this, std::placeholders::_1);
|
auto promptFunc = std::bind(&ChatLLM::handleNamePrompt, this, std::placeholders::_1);
|
||||||
auto responseFunc = std::bind(&ChatLLM::handleNameResponse, this, std::placeholders::_1, std::placeholders::_2);
|
auto responseFunc = std::bind(&ChatLLM::handleNameResponse, this, std::placeholders::_1, std::placeholders::_2);
|
||||||
auto recalcFunc = std::bind(&ChatLLM::handleNameRecalculate, this, std::placeholders::_1);
|
auto recalcFunc = std::bind(&ChatLLM::handleNameRecalculate, this, std::placeholders::_1);
|
||||||
LLModel::PromptContext ctx = m_ctx;
|
LLModel::PromptContext ctx = m_ctx;
|
||||||
m_llModelInfo.model->prompt("Describe response above in three words.", instructPrompt, promptFunc, responseFunc,
|
m_llModelInfo.model->prompt("Describe the above conversation in three words or less.",
|
||||||
recalcFunc, ctx);
|
promptTemplate.toStdString(), promptFunc, responseFunc, recalcFunc, ctx);
|
||||||
std::string trimmed = trim_whitespace(m_nameResponse);
|
std::string trimmed = trim_whitespace(m_nameResponse);
|
||||||
if (trimmed != m_nameResponse) {
|
if (trimmed != m_nameResponse) {
|
||||||
m_nameResponse = trimmed;
|
m_nameResponse = trimmed;
|
||||||
|
Loading…
Reference in New Issue
Block a user