mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-06-25 15:02:03 +00:00
rename the class to "OllamaClient"
This commit is contained in:
parent
06475dd113
commit
4c5dcf59ea
@ -3,7 +3,7 @@
|
|||||||
#include <QCoro/QCoroTask> // IWYU pragma: keep
|
#include <QCoro/QCoroTask> // IWYU pragma: keep
|
||||||
#include <fmt/base.h>
|
#include <fmt/base.h>
|
||||||
#include <gpt4all-backend/formatters.h> // IWYU pragma: keep
|
#include <gpt4all-backend/formatters.h> // IWYU pragma: keep
|
||||||
#include <gpt4all-backend/main.h>
|
#include <gpt4all-backend/ollama_client.h>
|
||||||
|
|
||||||
#include <QCoreApplication>
|
#include <QCoreApplication>
|
||||||
#include <QTimer>
|
#include <QTimer>
|
||||||
@ -14,13 +14,13 @@
|
|||||||
#include <expected>
|
#include <expected>
|
||||||
#include <variant>
|
#include <variant>
|
||||||
|
|
||||||
using gpt4all::backend::LLMProvider;
|
using gpt4all::backend::OllamaClient;
|
||||||
|
|
||||||
|
|
||||||
static void run()
|
static void run()
|
||||||
{
|
{
|
||||||
fmt::print("Connecting to server at {}\n", OLLAMA_URL);
|
fmt::print("Connecting to server at {}\n", OLLAMA_URL);
|
||||||
LLMProvider provider(OLLAMA_URL);
|
OllamaClient provider(OLLAMA_URL);
|
||||||
auto version = QCoro::waitFor(provider.getVersion());
|
auto version = QCoro::waitFor(provider.getVersion());
|
||||||
if (version) {
|
if (version) {
|
||||||
fmt::print("Server version: {}\n", version->version);
|
fmt::print("Server version: {}\n", version->version);
|
||||||
|
@ -11,7 +11,7 @@ add_subdirectory(deps)
|
|||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
|
|
||||||
target_sources(gpt4all-backend PUBLIC
|
target_sources(gpt4all-backend PUBLIC
|
||||||
FILE_SET public_headers TYPE HEADERS BASE_DIRS include
|
FILE_SET public_headers TYPE HEADERS BASE_DIRS include FILES
|
||||||
FILES include/gpt4all-backend/main.h
|
|
||||||
include/gpt4all-backend/formatters.h
|
include/gpt4all-backend/formatters.h
|
||||||
|
include/gpt4all-backend/ollama_client.h
|
||||||
)
|
)
|
||||||
|
@ -48,9 +48,9 @@ using DataOrRespErr = std::expected<T, ResponseError>;
|
|||||||
struct VersionResponse { QString version; };
|
struct VersionResponse { QString version; };
|
||||||
BOOST_DESCRIBE_STRUCT(VersionResponse, (), (version))
|
BOOST_DESCRIBE_STRUCT(VersionResponse, (), (version))
|
||||||
|
|
||||||
class LLMProvider {
|
class OllamaClient {
|
||||||
public:
|
public:
|
||||||
LLMProvider(QUrl baseUrl)
|
OllamaClient(QUrl baseUrl)
|
||||||
: m_baseUrl(baseUrl)
|
: m_baseUrl(baseUrl)
|
||||||
{}
|
{}
|
||||||
|
|
@ -2,7 +2,7 @@ set(TARGET gpt4all-backend)
|
|||||||
|
|
||||||
add_library(${TARGET} STATIC
|
add_library(${TARGET} STATIC
|
||||||
json_helpers.cpp
|
json_helpers.cpp
|
||||||
main.cpp
|
ollama_client.cpp
|
||||||
)
|
)
|
||||||
target_compile_features(${TARGET} PUBLIC cxx_std_23)
|
target_compile_features(${TARGET} PUBLIC cxx_std_23)
|
||||||
gpt4all_add_warning_options(${TARGET})
|
gpt4all_add_warning_options(${TARGET})
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#include "main.h"
|
#include "ollama_client.h"
|
||||||
|
|
||||||
#include "json_helpers.h"
|
#include "json_helpers.h"
|
||||||
|
|
||||||
@ -20,7 +20,7 @@ namespace json = boost::json;
|
|||||||
|
|
||||||
namespace gpt4all::backend {
|
namespace gpt4all::backend {
|
||||||
|
|
||||||
auto LLMProvider::getVersion() -> QCoro::Task<DataOrRespErr<VersionResponse>>
|
auto OllamaClient::getVersion() -> QCoro::Task<DataOrRespErr<VersionResponse>>
|
||||||
{
|
{
|
||||||
std::unique_ptr<QNetworkReply> reply(m_nam.get(QNetworkRequest(m_baseUrl.resolved(u"/api/version"_s))));
|
std::unique_ptr<QNetworkReply> reply(m_nam.get(QNetworkRequest(m_baseUrl.resolved(u"/api/version"_s))));
|
||||||
if (reply->error())
|
if (reply->error())
|
Loading…
Reference in New Issue
Block a user