mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-10 12:59:09 +00:00
Establish basic compiler warnings, and fix a few style issues (#3039)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -1,4 +1,7 @@
|
||||
cmake_minimum_required(VERSION 3.23) # for FILE_SET
|
||||
|
||||
include(../common/common.cmake)
|
||||
|
||||
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
@@ -94,8 +97,6 @@ if (LLMODEL_ROCM)
|
||||
list(APPEND BUILD_VARIANTS rocm rocm-avxonly)
|
||||
endif()
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
|
||||
# Go through each build variant
|
||||
foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
|
||||
# Determine flags
|
||||
@@ -151,6 +152,7 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
|
||||
# Add each individual implementations
|
||||
add_library(llamamodel-mainline-${BUILD_VARIANT} SHARED
|
||||
src/llamamodel.cpp src/llmodel_shared.cpp)
|
||||
gpt4all_add_warning_options(llamamodel-mainline-${BUILD_VARIANT})
|
||||
target_compile_definitions(llamamodel-mainline-${BUILD_VARIANT} PRIVATE
|
||||
LLAMA_VERSIONS=>=3 LLAMA_DATE=999999)
|
||||
target_include_directories(llamamodel-mainline-${BUILD_VARIANT} PRIVATE
|
||||
@@ -169,6 +171,7 @@ add_library(llmodel
|
||||
src/llmodel_c.cpp
|
||||
src/llmodel_shared.cpp
|
||||
)
|
||||
gpt4all_add_warning_options(llmodel)
|
||||
target_sources(llmodel PUBLIC
|
||||
FILE_SET public_headers TYPE HEADERS BASE_DIRS include
|
||||
FILES include/gpt4all-backend/llmodel.h
|
||||
|
@@ -146,7 +146,7 @@ public:
|
||||
virtual bool supportsEmbedding() const = 0;
|
||||
virtual bool supportsCompletion() const = 0;
|
||||
virtual bool loadModel(const std::string &modelPath, int n_ctx, int ngl) = 0;
|
||||
virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; };
|
||||
virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; }
|
||||
virtual bool isEmbeddingModel(const std::string &modelPath) const { (void)modelPath; return false; }
|
||||
virtual bool isModelLoaded() const = 0;
|
||||
virtual size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) = 0;
|
||||
|
@@ -260,7 +260,7 @@ void LLModel::generateResponse(std::function<bool(int32_t, const std::string&)>
|
||||
cachedTokens.push_back(new_tok.value());
|
||||
cachedResponse += new_piece;
|
||||
|
||||
auto accept = [this, &promptCtx, &cachedTokens, &new_tok, allowContextShift]() -> bool {
|
||||
auto accept = [this, &promptCtx, &new_tok, allowContextShift]() -> bool {
|
||||
// Shift context if out of space
|
||||
if (promptCtx.n_past >= promptCtx.n_ctx) {
|
||||
(void)allowContextShift;
|
||||
|
Reference in New Issue
Block a user