mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-22 11:58:55 +00:00
Fixups for Jinja PR (#3215)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -147,11 +147,10 @@ void Chat::newPromptResponsePair(const QString &prompt, const QList<QUrl> &attac
|
||||
promptPlusAttached = attachedContexts.join("\n\n") + "\n\n" + prompt;
|
||||
|
||||
resetResponseState();
|
||||
qsizetype prevMsgIndex = m_chatModel->count() - 1;
|
||||
if (prevMsgIndex >= 0)
|
||||
m_chatModel->updateCurrentResponse(prevMsgIndex, false);
|
||||
if (int count = m_chatModel->count())
|
||||
m_chatModel->updateCurrentResponse(count - 1, false);
|
||||
m_chatModel->appendPrompt(prompt, attachments);
|
||||
m_chatModel->appendResponse(prevMsgIndex + 1);
|
||||
m_chatModel->appendResponse();
|
||||
|
||||
emit promptRequested(m_collections);
|
||||
m_needsSave = true;
|
||||
|
@@ -352,7 +352,7 @@ public:
|
||||
emit countChanged();
|
||||
}
|
||||
|
||||
void appendResponse(int promptIndex)
|
||||
void appendResponse()
|
||||
{
|
||||
qsizetype count;
|
||||
{
|
||||
@@ -362,17 +362,13 @@ public:
|
||||
count = m_chatItems.count();
|
||||
}
|
||||
|
||||
int promptIndex = 0;
|
||||
beginInsertRows(QModelIndex(), count, count);
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
if (promptIndex >= 0) {
|
||||
if (promptIndex >= m_chatItems.size())
|
||||
throw std::out_of_range(fmt::format("index {} is out of range", promptIndex));
|
||||
auto &promptItem = m_chatItems[promptIndex];
|
||||
if (promptItem.type() != ChatItem::Type::Prompt)
|
||||
throw std::invalid_argument(fmt::format("item at index {} is not a prompt", promptIndex));
|
||||
}
|
||||
m_chatItems.emplace_back(ChatItem::response_tag, promptIndex);
|
||||
if (auto pi = getPeerUnlocked(m_chatItems.size() - 1))
|
||||
promptIndex = *pi;
|
||||
}
|
||||
endInsertRows();
|
||||
emit countChanged();
|
||||
@@ -394,7 +390,6 @@ public:
|
||||
qsizetype endIndex = startIndex + nNewItems;
|
||||
beginInsertRows(QModelIndex(), startIndex, endIndex - 1 /*inclusive*/);
|
||||
bool hadError;
|
||||
int promptIndex;
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
hadError = hasErrorUnlocked();
|
||||
@@ -408,8 +403,6 @@ public:
|
||||
// Server can add messages when there is an error because each call is a new conversation
|
||||
if (hadError)
|
||||
emit hasErrorChanged(false);
|
||||
if (promptIndex >= 0)
|
||||
emit dataChanged(createIndex(promptIndex, 0), createIndex(promptIndex, 0), {PeerRole});
|
||||
}
|
||||
|
||||
void truncate(qsizetype size)
|
||||
|
@@ -318,7 +318,7 @@ void ModelInfo::setRepeatPenaltyTokens(int t)
|
||||
|
||||
QVariant ModelInfo::defaultChatTemplate() const
|
||||
{
|
||||
auto res = m_chatTemplate.or_else([this] -> std::optional<QString> {
|
||||
auto res = m_chatTemplate.or_else([this]() -> std::optional<QString> {
|
||||
if (!installed || isOnline)
|
||||
return std::nullopt;
|
||||
if (!m_modelChatTemplate) {
|
||||
|
@@ -668,7 +668,7 @@ auto Server::handleCompletionRequest(const CompletionRequest &request)
|
||||
|
||||
// add prompt/response items to GUI
|
||||
m_chatModel->appendPrompt(request.prompt);
|
||||
m_chatModel->appendResponse(prevMsgIndex + 1);
|
||||
m_chatModel->appendResponse();
|
||||
|
||||
// FIXME(jared): taking parameters from the UI inhibits reproducibility of results
|
||||
LLModel::PromptContext promptCtx {
|
||||
|
Reference in New Issue
Block a user