mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-10 04:49:07 +00:00
python: load templates from model files, and add legacy template warning
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -312,6 +312,8 @@ int32_t llmodel_count_prompt_tokens(llmodel_model model, const char *prompt, con
|
||||
|
||||
void llmodel_model_foreach_special_token(llmodel_model model, llmodel_special_token_callback callback);
|
||||
|
||||
const char *llmodel_model_chat_template(const char *model_path, const char **error);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@@ -34,11 +34,11 @@ llmodel_model llmodel_model_create(const char *model_path)
|
||||
return fres;
|
||||
}
|
||||
|
||||
static void llmodel_set_error(const char **errptr, const char *message)
|
||||
static void llmodel_set_error(const char **errptr, std::string message)
|
||||
{
|
||||
thread_local static std::string last_error_message;
|
||||
if (errptr) {
|
||||
last_error_message = message;
|
||||
last_error_message = std::move(message);
|
||||
*errptr = last_error_message.c_str();
|
||||
}
|
||||
}
|
||||
@@ -318,3 +318,15 @@ void llmodel_model_foreach_special_token(llmodel_model model, llmodel_special_to
|
||||
for (auto &[name, token] : wrapper->llModel->specialTokens())
|
||||
callback(name.c_str(), token.c_str());
|
||||
}
|
||||
|
||||
const char *llmodel_model_chat_template(const char *model_path, const char **error)
|
||||
{
|
||||
static std::string s_chatTemplate;
|
||||
auto res = LLModel::Implementation::chatTemplate(model_path);
|
||||
if (res) {
|
||||
s_chatTemplate = *res;
|
||||
return s_chatTemplate.c_str();
|
||||
}
|
||||
llmodel_set_error(error, std::move(res.error()));
|
||||
return nullptr;
|
||||
}
|
||||
|
Reference in New Issue
Block a user