Fixups for Jinja PR (#3215)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-12-03 19:36:53 -05:00
committed by GitHub
parent 225bf6be93
commit 92acc7b3ac
10 changed files with 31 additions and 23 deletions

View File

@@ -495,7 +495,7 @@ jobs:
mkdir dotnet mkdir dotnet
cd dotnet cd dotnet
$dotnet_url="https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip" $dotnet_url="https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip"
wget "$dotnet_url" wget.exe "$dotnet_url"
Expand-Archive -LiteralPath .\dotnet-sdk-8.0.302-win-x64.zip Expand-Archive -LiteralPath .\dotnet-sdk-8.0.302-win-x64.zip
$Env:DOTNET_ROOT="$($(Get-Location).Path)\dotnet-sdk-8.0.302-win-x64" $Env:DOTNET_ROOT="$($(Get-Location).Path)\dotnet-sdk-8.0.302-win-x64"
$Env:PATH="$Env:DOTNET_ROOT;$Env:PATH" $Env:PATH="$Env:DOTNET_ROOT;$Env:PATH"

3
.gitmodules vendored
View File

@@ -20,3 +20,6 @@
[submodule "gpt4all-chat/deps/Jinja2Cpp"] [submodule "gpt4all-chat/deps/Jinja2Cpp"]
path = gpt4all-chat/deps/Jinja2Cpp path = gpt4all-chat/deps/Jinja2Cpp
url = https://github.com/nomic-ai/jinja2cpp.git url = https://github.com/nomic-ai/jinja2cpp.git
[submodule "gpt4all-chat/deps/rapidjson"]
path = gpt4all-chat/deps/rapidjson
url = https://github.com/nomic-ai/rapidjson.git

View File

@@ -33,6 +33,10 @@ option(GPT4ALL_SIGN_INSTALL "Sign installed binaries and installers (requires si
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_CXX_STANDARD 23) set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_STANDARD_REQUIRED ON)
if (MSVC)
# Enable accurate __cplusplus macro to fix errors in Jinja2Cpp
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
endif()
# conftests # conftests
@@ -98,6 +102,10 @@ message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
endif()
add_subdirectory(deps) add_subdirectory(deps)
add_subdirectory(../gpt4all-backend llmodel) add_subdirectory(../gpt4all-backend llmodel)
@@ -398,10 +406,6 @@ endif()
set(COMPONENT_NAME_MAIN ${PROJECT_NAME}) set(COMPONENT_NAME_MAIN ${PROJECT_NAME})
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
endif()
install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN}) install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})
install( install(
@@ -514,6 +518,7 @@ elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
set(CPACK_BUNDLE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns") set(CPACK_BUNDLE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
endif() endif()
set(CPACK_COMPONENTS_ALL gpt4all) # exclude development components
set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN}) set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR}) set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})

View File

@@ -12,4 +12,11 @@ add_subdirectory(DuckX)
set(QT_VERSION_MAJOR 6) set(QT_VERSION_MAJOR 6)
add_subdirectory(QXlsx/QXlsx) add_subdirectory(QXlsx/QXlsx)
# forked dependency of Jinja2Cpp
set(RAPIDJSON_BUILD_DOC OFF)
set(RAPIDJSON_BUILD_EXAMPLES OFF)
set(RAPIDJSON_BUILD_TESTS OFF)
set(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT OFF)
add_subdirectory(rapidjson)
add_subdirectory(Jinja2Cpp) add_subdirectory(Jinja2Cpp)

View File

@@ -147,11 +147,10 @@ void Chat::newPromptResponsePair(const QString &prompt, const QList<QUrl> &attac
promptPlusAttached = attachedContexts.join("\n\n") + "\n\n" + prompt; promptPlusAttached = attachedContexts.join("\n\n") + "\n\n" + prompt;
resetResponseState(); resetResponseState();
qsizetype prevMsgIndex = m_chatModel->count() - 1; if (int count = m_chatModel->count())
if (prevMsgIndex >= 0) m_chatModel->updateCurrentResponse(count - 1, false);
m_chatModel->updateCurrentResponse(prevMsgIndex, false);
m_chatModel->appendPrompt(prompt, attachments); m_chatModel->appendPrompt(prompt, attachments);
m_chatModel->appendResponse(prevMsgIndex + 1); m_chatModel->appendResponse();
emit promptRequested(m_collections); emit promptRequested(m_collections);
m_needsSave = true; m_needsSave = true;

View File

@@ -352,7 +352,7 @@ public:
emit countChanged(); emit countChanged();
} }
void appendResponse(int promptIndex) void appendResponse()
{ {
qsizetype count; qsizetype count;
{ {
@@ -362,17 +362,13 @@ public:
count = m_chatItems.count(); count = m_chatItems.count();
} }
int promptIndex = 0;
beginInsertRows(QModelIndex(), count, count); beginInsertRows(QModelIndex(), count, count);
{ {
QMutexLocker locker(&m_mutex); QMutexLocker locker(&m_mutex);
if (promptIndex >= 0) {
if (promptIndex >= m_chatItems.size())
throw std::out_of_range(fmt::format("index {} is out of range", promptIndex));
auto &promptItem = m_chatItems[promptIndex];
if (promptItem.type() != ChatItem::Type::Prompt)
throw std::invalid_argument(fmt::format("item at index {} is not a prompt", promptIndex));
}
m_chatItems.emplace_back(ChatItem::response_tag, promptIndex); m_chatItems.emplace_back(ChatItem::response_tag, promptIndex);
if (auto pi = getPeerUnlocked(m_chatItems.size() - 1))
promptIndex = *pi;
} }
endInsertRows(); endInsertRows();
emit countChanged(); emit countChanged();
@@ -394,7 +390,6 @@ public:
qsizetype endIndex = startIndex + nNewItems; qsizetype endIndex = startIndex + nNewItems;
beginInsertRows(QModelIndex(), startIndex, endIndex - 1 /*inclusive*/); beginInsertRows(QModelIndex(), startIndex, endIndex - 1 /*inclusive*/);
bool hadError; bool hadError;
int promptIndex;
{ {
QMutexLocker locker(&m_mutex); QMutexLocker locker(&m_mutex);
hadError = hasErrorUnlocked(); hadError = hasErrorUnlocked();
@@ -408,8 +403,6 @@ public:
// Server can add messages when there is an error because each call is a new conversation // Server can add messages when there is an error because each call is a new conversation
if (hadError) if (hadError)
emit hasErrorChanged(false); emit hasErrorChanged(false);
if (promptIndex >= 0)
emit dataChanged(createIndex(promptIndex, 0), createIndex(promptIndex, 0), {PeerRole});
} }
void truncate(qsizetype size) void truncate(qsizetype size)

View File

@@ -318,7 +318,7 @@ void ModelInfo::setRepeatPenaltyTokens(int t)
QVariant ModelInfo::defaultChatTemplate() const QVariant ModelInfo::defaultChatTemplate() const
{ {
auto res = m_chatTemplate.or_else([this] -> std::optional<QString> { auto res = m_chatTemplate.or_else([this]() -> std::optional<QString> {
if (!installed || isOnline) if (!installed || isOnline)
return std::nullopt; return std::nullopt;
if (!m_modelChatTemplate) { if (!m_modelChatTemplate) {

View File

@@ -668,7 +668,7 @@ auto Server::handleCompletionRequest(const CompletionRequest &request)
// add prompt/response items to GUI // add prompt/response items to GUI
m_chatModel->appendPrompt(request.prompt); m_chatModel->appendPrompt(request.prompt);
m_chatModel->appendResponse(prevMsgIndex + 1); m_chatModel->appendResponse();
// FIXME(jared): taking parameters from the UI inhibits reproducibility of results // FIXME(jared): taking parameters from the UI inhibits reproducibility of results
LLModel::PromptContext promptCtx { LLModel::PromptContext promptCtx {