WIP: use Boost::json for incremental parsing and reflection

This commit is contained in:
Jared Van Bortel 2025-02-26 15:22:13 -05:00
parent 927e963076
commit 06475dd113
9 changed files with 90 additions and 32 deletions

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.29)
cmake_minimum_required(VERSION 3.28...3.31)
project(gpt4all-backend-test VERSION 0.1 LANGUAGES CXX)
set(G4A_TEST_OLLAMA_URL "http://localhost:11434/" CACHE STRING "The base URL of the Ollama server to use.")

View File

@ -23,9 +23,9 @@ static void run()
LLMProvider provider(OLLAMA_URL);
auto version = QCoro::waitFor(provider.getVersion());
if (version) {
fmt::print("Server version: {}\n", *version);
fmt::print("Server version: {}\n", version->version);
} else {
fmt::print("Network error: {}\n", version.error().errorString);
fmt::print("Error retrieving version: {}\n", version.error().errorString);
return QCoreApplication::exit(1);
}
QCoreApplication::exit(0);

View File

@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.28)
cmake_minimum_required(VERSION 3.28...3.31)
project(gpt4all-backend VERSION 0.1 LANGUAGES CXX)
set(CMAKE_CXX_STANDARD 23) # make sure fmt is compiled with the same C++ version as us
@ -7,6 +7,7 @@ include(../common/common.cmake)
find_package(Qt6 6.8 COMPONENTS Concurrent Core Network REQUIRED)
add_subdirectory(../deps common_deps)
add_subdirectory(deps)
add_subdirectory(src)
target_sources(gpt4all-backend PUBLIC

View File

@ -0,0 +1,14 @@
include(FetchContent)
set(BUILD_SHARED_LIBS OFF)
# suppress warnings during boost build
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:BOOST_ALLOW_DEPRECATED_HEADERS>)
set(GPT4ALL_BOOST_TAG 1.87.0)
FetchContent_Declare(
boost
URL "https://github.com/boostorg/boost/releases/download/boost-${GPT4ALL_BOOST_TAG}/boost-${GPT4ALL_BOOST_TAG}-cmake.tar.xz"
URL_HASH "SHA256=7da75f171837577a52bbf217e17f8ea576c7c246e4594d617bfde7fafd408be5"
)
FetchContent_MakeAvailable(boost)

View File

@ -1,6 +1,7 @@
#pragma once
#include <QCoro/QCoroTask> // IWYU pragma: keep
#include <boost/describe/class.hpp>
#include <QJsonParseError>
#include <QNetworkReply>
@ -19,7 +20,7 @@ struct ResponseError {
private:
using ErrorCode = std::variant<
QNetworkReply::NetworkError,
QJsonParseError::ParseError
std::exception_ptr
>;
public:
@ -33,17 +34,19 @@ public:
assert(reply->error());
}
ResponseError(const QJsonParseError &err)
: error(err.error)
, errorString(err.errorString())
ResponseError(const std::exception &e, std::exception_ptr err)
: error(std::move(err))
, errorString(e.what())
{
assert(err.error);
assert(std::get<std::exception_ptr>(error));
}
};
template <typename T>
using DataOrRespErr = std::expected<T, ResponseError>;
struct VersionResponse { QString version; };
BOOST_DESCRIBE_STRUCT(VersionResponse, (), (version))
class LLMProvider {
public:
@ -55,10 +58,11 @@ public:
void getBaseUrl(QUrl value) { m_baseUrl = std::move(value); }
/// Retrieve the Ollama version, e.g. "0.5.1"
auto getVersion() const -> QCoro::Task<DataOrRespErr<QString>>;
auto getVersion() -> QCoro::Task<DataOrRespErr<VersionResponse>>;
private:
QUrl m_baseUrl;
QUrl m_baseUrl;
QNetworkAccessManager m_nam;
};
} // namespace gpt4all::backend

View File

@ -1,13 +1,27 @@
set(TARGET gpt4all-backend)
add_library(${TARGET} STATIC
json_helpers.cpp
main.cpp
)
target_compile_features(${TARGET} PUBLIC cxx_std_23)
gpt4all_add_warning_options(${TARGET})
target_include_directories(${TARGET} PRIVATE
.
../include/gpt4all-backend
)
target_link_libraries(${TARGET} PUBLIC
QCoro6::Coro Qt6::Core Qt6::Network
Boost::describe
QCoro6::Coro
Qt6::Core
Qt6::Network
)
target_link_libraries(${TARGET} PRIVATE
QCoro6::Network fmt::fmt
QCoro6::Network
fmt::fmt
)
# link Boost::json as -isystem to suppress -Wundef
get_target_property(LIB_INCLUDE_DIRS Boost::json INTERFACE_INCLUDE_DIRECTORIES)
target_include_directories(${TARGET} SYSTEM PRIVATE ${LIB_INCLUDE_DIRS})
target_link_libraries(${TARGET} PRIVATE Boost::json)

View File

@ -0,0 +1,12 @@
#include "json_helpers.h"
#include <boost/json.hpp>
#include <QString>
QString tag_invoke(const boost::json::value_to_tag<QString> &, const boost::json::value &value)
{
auto &s = value.as_string();
return QString::fromUtf8(s.data(), s.size());
}

View File

@ -0,0 +1,11 @@
#pragma once
class QString;
namespace boost::json {
class value;
template <typename T> struct value_to_tag;
}
/// Allows JSON strings to be deserialized as QString.
QString tag_invoke(const boost::json::value_to_tag<QString> &, const boost::json::value &value);

View File

@ -1,43 +1,45 @@
#include <gpt4all-backend/main.h>
#include "main.h"
#include "json_helpers.h"
#include <QCoro/QCoroIODevice> // IWYU pragma: keep
#include <QCoro/QCoroNetworkReply> // IWYU pragma: keep
#include <boost/json.hpp>
#include <QByteArray>
#include <QJsonDocument>
#include <QNetworkAccessManager>
#include <QNetworkRequest>
#include <QJsonObject>
#include <QJsonValue>
#include <coroutine>
#include <expected>
#include <memory>
using namespace Qt::Literals::StringLiterals;
namespace json = boost::json;
namespace gpt4all::backend {
auto LLMProvider::getVersion() const -> QCoro::Task<DataOrRespErr<QString>>
auto LLMProvider::getVersion() -> QCoro::Task<DataOrRespErr<VersionResponse>>
{
QNetworkAccessManager nam;
std::unique_ptr<QNetworkReply> reply(co_await nam.get(QNetworkRequest(m_baseUrl.resolved(u"/api/version"_s))));
std::unique_ptr<QNetworkReply> reply(m_nam.get(QNetworkRequest(m_baseUrl.resolved(u"/api/version"_s))));
if (reply->error())
co_return std::unexpected(reply.get());
QJsonParseError error;
auto doc = QJsonDocument::fromJson(reply->readAll(), &error);
if (doc.isNull())
co_return std::unexpected(error);
try {
json::parser p;
auto coroReply = qCoro(*reply);
do {
auto chunk = co_await coroReply.readAll();
if (reply->error())
co_return std::unexpected(reply.get());
p.write(chunk.data(), chunk.size());
} while (!reply->atEnd());
assert(doc.isObject());
auto obj = doc.object();
auto version = std::as_const(obj).find("version"_L1);
assert(version != obj.constEnd());
assert(version->isString());
co_return version->toString();
co_return json::value_to<VersionResponse>(p.release());
} catch (const std::exception &e) {
co_return std::unexpected(ResponseError(e, std::current_exception()));
}
}
} // namespace gpt4all::backend