diff --git a/gpt4all-backend-test/src/main.cpp b/gpt4all-backend-test/src/main.cpp index cc0b9f23..a5098dc9 100644 --- a/gpt4all-backend-test/src/main.cpp +++ b/gpt4all-backend-test/src/main.cpp @@ -36,7 +36,15 @@ static void run() for (const auto & model : modelsResponse->models) fmt::print("{}\n", model.model); } else { - fmt::print("Error retrieving version: {}\n", modelsResponse.error().errorString); + fmt::print("Error retrieving available models: {}\n", modelsResponse.error().errorString); + return QCoreApplication::exit(1); + } + + auto showResponse = QCoro::waitFor(provider.showModelInfo({ .model = "DeepSeek-R1-Distill-Llama-70B-Q4_K_S" })); + if (showResponse) { + fmt::print("Model family: {}\n", showResponse->details.family); + } else { + fmt::print("Error retrieving model info: {}\n", showResponse.error().errorString); return QCoreApplication::exit(1); } diff --git a/gpt4all-backend/include/gpt4all-backend/ollama-client.h b/gpt4all-backend/include/gpt4all-backend/ollama-client.h index 2c956252..ccecffed 100644 --- a/gpt4all-backend/include/gpt4all-backend/ollama-client.h +++ b/gpt4all-backend/include/gpt4all-backend/ollama-client.h @@ -92,4 +92,7 @@ private: extern template auto OllamaClient::get(const QString &) -> QCoro::Task>; extern template auto OllamaClient::get(const QString &) -> QCoro::Task>; +extern template auto OllamaClient::post(const QString &, const ollama::ModelInfoRequest &) + -> QCoro::Task>; + } // namespace gpt4all::backend diff --git a/gpt4all-backend/include/gpt4all-backend/ollama-types.h b/gpt4all-backend/include/gpt4all-backend/ollama-types.h index 0a59fe8a..1219adab 100644 --- a/gpt4all-backend/include/gpt4all-backend/ollama-types.h +++ b/gpt4all-backend/include/gpt4all-backend/ollama-types.h @@ -14,6 +14,10 @@ namespace gpt4all::backend::ollama { +// +// basic types +// + /// Details about a model. struct ModelDetails { QString parent_model; /// The parent of the model. @@ -39,15 +43,6 @@ struct Model { BOOST_DESCRIBE_STRUCT(Model, (), (model, modified_at, size, digest, details)) #endif - -/// Request class for the show model info endpoint. -struct ModelInfoRequest { - QString model; /// The model name. -}; -#ifdef G4A_BACKEND_IMPL -BOOST_DESCRIBE_STRUCT(ModelInfoRequest, (), (model)) -#endif - enum MessageRole { system, user, @@ -84,6 +79,21 @@ struct Message { BOOST_DESCRIBE_STRUCT(Message, (), (role, content, images, tool_calls)) #endif +// +// request types +// + +/// Request class for the show model info endpoint. +struct ModelInfoRequest { + QString model; /// The model name. +}; +#ifdef G4A_BACKEND_IMPL +BOOST_DESCRIBE_STRUCT(ModelInfoRequest, (), (model)) +#endif + +// +// response types +// /// The response class for the version endpoint. struct VersionResponse { diff --git a/gpt4all-backend/src/ollama-client.cpp b/gpt4all-backend/src/ollama-client.cpp index 74029628..339c363e 100644 --- a/gpt4all-backend/src/ollama-client.cpp +++ b/gpt4all-backend/src/ollama-client.cpp @@ -74,6 +74,8 @@ auto OllamaClient::post(const QString &path, const Req &req) -> QCoro::Task QCoro::Task>; + auto OllamaClient::getJson(const QString &path) -> QCoro::Task> { std::unique_ptr reply(m_nam.get(makeRequest(path)));