implement local Nomic Embed via llama.cpp (#2086)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel
2024-03-13 18:09:24 -04:00
committed by GitHub
parent 171f4e488e
commit 406e88b59a
23 changed files with 799 additions and 1198 deletions

View File

@@ -213,21 +213,26 @@ LLModel *LLModel::Implementation::constructDefaultLlama() {
}
std::vector<LLModel::GPUDevice> LLModel::Implementation::availableGPUDevices() {
auto * llama = constructDefaultLlama();
auto *llama = constructDefaultLlama();
if (llama) { return llama->availableGPUDevices(0); }
return {};
}
int32_t LLModel::Implementation::maxContextLength(const std::string &modelPath) {
auto * llama = constructDefaultLlama();
auto *llama = constructDefaultLlama();
return llama ? llama->maxContextLength(modelPath) : -1;
}
int32_t LLModel::Implementation::layerCount(const std::string &modelPath) {
auto * llama = constructDefaultLlama();
auto *llama = constructDefaultLlama();
return llama ? llama->layerCount(modelPath) : -1;
}
bool LLModel::Implementation::isEmbeddingModel(const std::string &modelPath) {
auto *llama = constructDefaultLlama();
return llama && llama->isEmbeddingModel(modelPath);
}
void LLModel::Implementation::setImplementationsSearchPath(const std::string& path) {
s_implementations_search_path = path;
}