mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-12 14:01:38 +00:00
Fixups for Jinja PR (#3215)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -495,7 +495,7 @@ jobs:
|
||||
mkdir dotnet
|
||||
cd dotnet
|
||||
$dotnet_url="https://download.visualstudio.microsoft.com/download/pr/5af098e1-e433-4fda-84af-3f54fd27c108/6bd1c6e48e64e64871957289023ca590/dotnet-sdk-8.0.302-win-x64.zip"
|
||||
wget "$dotnet_url"
|
||||
wget.exe "$dotnet_url"
|
||||
Expand-Archive -LiteralPath .\dotnet-sdk-8.0.302-win-x64.zip
|
||||
$Env:DOTNET_ROOT="$($(Get-Location).Path)\dotnet-sdk-8.0.302-win-x64"
|
||||
$Env:PATH="$Env:DOTNET_ROOT;$Env:PATH"
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -20,3 +20,6 @@
|
||||
[submodule "gpt4all-chat/deps/Jinja2Cpp"]
|
||||
path = gpt4all-chat/deps/Jinja2Cpp
|
||||
url = https://github.com/nomic-ai/jinja2cpp.git
|
||||
[submodule "gpt4all-chat/deps/rapidjson"]
|
||||
path = gpt4all-chat/deps/rapidjson
|
||||
url = https://github.com/nomic-ai/rapidjson.git
|
||||
|
@@ -33,6 +33,10 @@ option(GPT4ALL_SIGN_INSTALL "Sign installed binaries and installers (requires si
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
set(CMAKE_CXX_STANDARD 23)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
if (MSVC)
|
||||
# Enable accurate __cplusplus macro to fix errors in Jinja2Cpp
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/Zc:__cplusplus>)
|
||||
endif()
|
||||
|
||||
|
||||
# conftests
|
||||
@@ -98,6 +102,10 @@ message(STATUS "Qt 6 root directory: ${Qt6_ROOT_DIR}")
|
||||
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
|
||||
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
|
||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
|
||||
endif()
|
||||
|
||||
add_subdirectory(deps)
|
||||
add_subdirectory(../gpt4all-backend llmodel)
|
||||
|
||||
@@ -398,10 +406,6 @@ endif()
|
||||
|
||||
set(COMPONENT_NAME_MAIN ${PROJECT_NAME})
|
||||
|
||||
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
|
||||
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install CACHE PATH "..." FORCE)
|
||||
endif()
|
||||
|
||||
install(TARGETS chat DESTINATION bin COMPONENT ${COMPONENT_NAME_MAIN})
|
||||
|
||||
install(
|
||||
@@ -514,6 +518,7 @@ elseif(${CMAKE_SYSTEM_NAME} MATCHES Darwin)
|
||||
set(CPACK_BUNDLE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns")
|
||||
endif()
|
||||
|
||||
set(CPACK_COMPONENTS_ALL gpt4all) # exclude development components
|
||||
set(CPACK_PACKAGE_INSTALL_DIRECTORY ${COMPONENT_NAME_MAIN})
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})
|
||||
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR})
|
||||
|
@@ -12,4 +12,11 @@ add_subdirectory(DuckX)
|
||||
set(QT_VERSION_MAJOR 6)
|
||||
add_subdirectory(QXlsx/QXlsx)
|
||||
|
||||
# forked dependency of Jinja2Cpp
|
||||
set(RAPIDJSON_BUILD_DOC OFF)
|
||||
set(RAPIDJSON_BUILD_EXAMPLES OFF)
|
||||
set(RAPIDJSON_BUILD_TESTS OFF)
|
||||
set(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT OFF)
|
||||
add_subdirectory(rapidjson)
|
||||
|
||||
add_subdirectory(Jinja2Cpp)
|
||||
|
Submodule gpt4all-chat/deps/Jinja2Cpp updated: b2a716798b...bcf2f82ae1
1
gpt4all-chat/deps/rapidjson
Submodule
1
gpt4all-chat/deps/rapidjson
Submodule
Submodule gpt4all-chat/deps/rapidjson added at 9b547ef4bd
@@ -147,11 +147,10 @@ void Chat::newPromptResponsePair(const QString &prompt, const QList<QUrl> &attac
|
||||
promptPlusAttached = attachedContexts.join("\n\n") + "\n\n" + prompt;
|
||||
|
||||
resetResponseState();
|
||||
qsizetype prevMsgIndex = m_chatModel->count() - 1;
|
||||
if (prevMsgIndex >= 0)
|
||||
m_chatModel->updateCurrentResponse(prevMsgIndex, false);
|
||||
if (int count = m_chatModel->count())
|
||||
m_chatModel->updateCurrentResponse(count - 1, false);
|
||||
m_chatModel->appendPrompt(prompt, attachments);
|
||||
m_chatModel->appendResponse(prevMsgIndex + 1);
|
||||
m_chatModel->appendResponse();
|
||||
|
||||
emit promptRequested(m_collections);
|
||||
m_needsSave = true;
|
||||
|
@@ -352,7 +352,7 @@ public:
|
||||
emit countChanged();
|
||||
}
|
||||
|
||||
void appendResponse(int promptIndex)
|
||||
void appendResponse()
|
||||
{
|
||||
qsizetype count;
|
||||
{
|
||||
@@ -362,17 +362,13 @@ public:
|
||||
count = m_chatItems.count();
|
||||
}
|
||||
|
||||
int promptIndex = 0;
|
||||
beginInsertRows(QModelIndex(), count, count);
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
if (promptIndex >= 0) {
|
||||
if (promptIndex >= m_chatItems.size())
|
||||
throw std::out_of_range(fmt::format("index {} is out of range", promptIndex));
|
||||
auto &promptItem = m_chatItems[promptIndex];
|
||||
if (promptItem.type() != ChatItem::Type::Prompt)
|
||||
throw std::invalid_argument(fmt::format("item at index {} is not a prompt", promptIndex));
|
||||
}
|
||||
m_chatItems.emplace_back(ChatItem::response_tag, promptIndex);
|
||||
if (auto pi = getPeerUnlocked(m_chatItems.size() - 1))
|
||||
promptIndex = *pi;
|
||||
}
|
||||
endInsertRows();
|
||||
emit countChanged();
|
||||
@@ -394,7 +390,6 @@ public:
|
||||
qsizetype endIndex = startIndex + nNewItems;
|
||||
beginInsertRows(QModelIndex(), startIndex, endIndex - 1 /*inclusive*/);
|
||||
bool hadError;
|
||||
int promptIndex;
|
||||
{
|
||||
QMutexLocker locker(&m_mutex);
|
||||
hadError = hasErrorUnlocked();
|
||||
@@ -408,8 +403,6 @@ public:
|
||||
// Server can add messages when there is an error because each call is a new conversation
|
||||
if (hadError)
|
||||
emit hasErrorChanged(false);
|
||||
if (promptIndex >= 0)
|
||||
emit dataChanged(createIndex(promptIndex, 0), createIndex(promptIndex, 0), {PeerRole});
|
||||
}
|
||||
|
||||
void truncate(qsizetype size)
|
||||
|
@@ -318,7 +318,7 @@ void ModelInfo::setRepeatPenaltyTokens(int t)
|
||||
|
||||
QVariant ModelInfo::defaultChatTemplate() const
|
||||
{
|
||||
auto res = m_chatTemplate.or_else([this] -> std::optional<QString> {
|
||||
auto res = m_chatTemplate.or_else([this]() -> std::optional<QString> {
|
||||
if (!installed || isOnline)
|
||||
return std::nullopt;
|
||||
if (!m_modelChatTemplate) {
|
||||
|
@@ -668,7 +668,7 @@ auto Server::handleCompletionRequest(const CompletionRequest &request)
|
||||
|
||||
// add prompt/response items to GUI
|
||||
m_chatModel->appendPrompt(request.prompt);
|
||||
m_chatModel->appendResponse(prevMsgIndex + 1);
|
||||
m_chatModel->appendResponse();
|
||||
|
||||
// FIXME(jared): taking parameters from the UI inhibits reproducibility of results
|
||||
LLModel::PromptContext promptCtx {
|
||||
|
Reference in New Issue
Block a user