mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-20 20:53:23 +00:00
fixups for GPT4All v3.5.0-rc2 (#3239)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
7628106d55
commit
70cca3fdcf
@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||
### Changed
|
||||
- Update Italian translation ([#3236](https://github.com/nomic-ai/gpt4all/pull/3236))
|
||||
|
||||
### Fixed
|
||||
- Fix a few more problems with the Jinja changes ([#3239](https://github.com/nomic-ai/gpt4all/pull/3239))
|
||||
|
||||
## [3.5.0-rc2] - 2024-12-06
|
||||
|
||||
### Changed
|
||||
|
@ -599,7 +599,14 @@ GridLayout {
|
||||
Layout.fillWidth: false
|
||||
name: editingDisabledReason ?? qsTr("Redo")
|
||||
source: "qrc:/gpt4all/icons/regenerate.svg"
|
||||
onClicked: redoResponseDialog.open()
|
||||
onClicked: {
|
||||
if (index == chatModel.count - 1) {
|
||||
// regenerate last message without confirmation
|
||||
currentChat.regenerateResponse(index);
|
||||
return;
|
||||
}
|
||||
redoResponseDialog.open();
|
||||
}
|
||||
}
|
||||
|
||||
ChatMessageButton {
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <jinja2cpp/error_info.h>
|
||||
#include <jinja2cpp/template.h>
|
||||
#include <jinja2cpp/template_env.h>
|
||||
#include <jinja2cpp/user_callable.h>
|
||||
#include <jinja2cpp/value.h>
|
||||
|
||||
#include <QDataStream>
|
||||
@ -70,19 +71,23 @@ static jinja2::TemplateEnv *jinjaEnv()
|
||||
settings.lstripBlocks = true;
|
||||
env.AddGlobal("raise_exception", jinja2::UserCallable(
|
||||
/*callable*/ [](auto ¶ms) -> jinja2::Value {
|
||||
auto &message = params.args.at("message").asString();
|
||||
throw std::runtime_error(fmt::format("Jinja template error: {}", message));
|
||||
auto messageArg = params.args.find("message");
|
||||
if (messageArg == params.args.end() || !messageArg->second.isString())
|
||||
throw std::runtime_error("'message' argument to raise_exception() must be a string");
|
||||
throw std::runtime_error(fmt::format("Jinja template error: {}", messageArg->second.asString()));
|
||||
},
|
||||
/*argsInfo*/ { jinja2::ArgInfo("message", /*isMandatory*/ true) }
|
||||
));
|
||||
env.AddGlobal("strftime_now", jinja2::UserCallable(
|
||||
/*callable*/ [](auto ¶ms) -> jinja2::Value {
|
||||
using Clock = std::chrono::system_clock;
|
||||
auto &format = params.args.at("format").asString();
|
||||
auto formatArg = params.args.find("format");
|
||||
if (formatArg == params.args.end() || !formatArg->second.isString())
|
||||
throw std::runtime_error("'format' argument to strftime_now() must be a string");
|
||||
time_t nowUnix = Clock::to_time_t(Clock::now());
|
||||
auto localDate = *std::localtime(&nowUnix);
|
||||
std::ostringstream ss;
|
||||
ss << std::put_time(&localDate, format.c_str());
|
||||
ss << std::put_time(&localDate, formatArg->second.asString().c_str());
|
||||
return ss.str();
|
||||
},
|
||||
/*argsInfo*/ { jinja2::ArgInfo("format", /*isMandatory*/ true) }
|
||||
@ -923,7 +928,7 @@ auto ChatLLM::promptInternal(
|
||||
if (auto limit = nCtx - 4; lastMessageLength > limit) {
|
||||
throw std::invalid_argument(
|
||||
tr("Your message was too long and could not be processed (%1 > %2). "
|
||||
"Please try again with something shorter.").arg(lastMessageLength, limit).toUtf8().constData()
|
||||
"Please try again with something shorter.").arg(lastMessageLength).arg(limit).toUtf8().constData()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -47,8 +47,33 @@ using namespace Qt::Literals::StringLiterals;
|
||||
|
||||
#define MODELS_JSON_VERSION "3"
|
||||
|
||||
|
||||
static const QStringList FILENAME_BLACKLIST { u"gpt4all-nomic-embed-text-v1.rmodel"_s };
|
||||
|
||||
static const QString RMODEL_CHAT_TEMPLATE = uR"(<chat>
|
||||
{%- set loop_messages = messages %}
|
||||
{%- for message in loop_messages %}
|
||||
{{- raise_exception('Unknown role: ' + messages['role']) }}
|
||||
{{- '<' + message['role'] + '>' }}
|
||||
{%- if message['role'] == 'user' %}
|
||||
{%- for source in message.sources %}
|
||||
{%- if loop.first %}
|
||||
{{- '### Context:\n' }}
|
||||
{%- endif %}
|
||||
{{- 'Collection: ' + source.collection + '\n' +
|
||||
'Path: ' + source.path + '\n' +
|
||||
'Excerpt: ' + source.text + '\n\n' }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- for attachment in message.prompt_attachments %}
|
||||
{{- attachment.processed_content + '\n\n' }}
|
||||
{%- endfor %}
|
||||
{{- message.content }}
|
||||
{{- '</' + message['role'] + '>' }}
|
||||
{%- endfor %}
|
||||
</chat>)"_s;
|
||||
|
||||
|
||||
QString ModelInfo::id() const
|
||||
{
|
||||
return m_id;
|
||||
@ -1367,6 +1392,7 @@ void ModelList::processModelDirectory(const QString &path)
|
||||
// The description is hard-coded into "GPT4All.ini" due to performance issue.
|
||||
// If the description goes to be dynamic from its .rmodel file, it will get high I/O usage while using the ModelList.
|
||||
data.append({ DescriptionRole, description });
|
||||
data.append({ ChatTemplateRole, RMODEL_CHAT_TEMPLATE });
|
||||
}
|
||||
updateData(id, data);
|
||||
}
|
||||
@ -1655,7 +1681,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
||||
{ ModelList::ParametersRole, "?" },
|
||||
{ ModelList::QuantRole, "NA" },
|
||||
{ ModelList::TypeRole, "GPT" },
|
||||
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions"},
|
||||
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions" },
|
||||
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
|
||||
};
|
||||
updateData(id, data);
|
||||
}
|
||||
@ -1683,7 +1710,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
||||
{ ModelList::ParametersRole, "?" },
|
||||
{ ModelList::QuantRole, "NA" },
|
||||
{ ModelList::TypeRole, "GPT" },
|
||||
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions"},
|
||||
{ ModelList::UrlRole, "https://api.openai.com/v1/chat/completions" },
|
||||
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
|
||||
};
|
||||
updateData(id, data);
|
||||
}
|
||||
@ -1714,7 +1742,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
||||
{ ModelList::ParametersRole, "?" },
|
||||
{ ModelList::QuantRole, "NA" },
|
||||
{ ModelList::TypeRole, "Mistral" },
|
||||
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"},
|
||||
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" },
|
||||
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
|
||||
};
|
||||
updateData(id, data);
|
||||
}
|
||||
@ -1739,7 +1768,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
||||
{ ModelList::ParametersRole, "?" },
|
||||
{ ModelList::QuantRole, "NA" },
|
||||
{ ModelList::TypeRole, "Mistral" },
|
||||
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"},
|
||||
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" },
|
||||
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
|
||||
};
|
||||
updateData(id, data);
|
||||
}
|
||||
@ -1765,7 +1795,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
||||
{ ModelList::ParametersRole, "?" },
|
||||
{ ModelList::QuantRole, "NA" },
|
||||
{ ModelList::TypeRole, "Mistral" },
|
||||
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions"},
|
||||
{ ModelList::UrlRole, "https://api.mistral.ai/v1/chat/completions" },
|
||||
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
|
||||
};
|
||||
updateData(id, data);
|
||||
}
|
||||
@ -1794,6 +1825,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save)
|
||||
{ ModelList::ParametersRole, "?" },
|
||||
{ ModelList::QuantRole, "NA" },
|
||||
{ ModelList::TypeRole, "NA" },
|
||||
{ ModelList::ChatTemplateRole, RMODEL_CHAT_TEMPLATE },
|
||||
};
|
||||
updateData(id, data);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user