mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-12-21 02:53:17 +00:00
backend: rebase llama.cpp on upstream as of Sep 26th (#2998)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -71,19 +71,19 @@ bool ChatAPI::isModelLoaded() const
|
||||
// All three of the state virtual functions are handled custom inside of chatllm save/restore
|
||||
size_t ChatAPI::stateSize() const
|
||||
{
|
||||
return 0;
|
||||
throw std::logic_error("not implemented");
|
||||
}
|
||||
|
||||
size_t ChatAPI::saveState(uint8_t *dest) const
|
||||
size_t ChatAPI::saveState(std::span<uint8_t> dest) const
|
||||
{
|
||||
Q_UNUSED(dest);
|
||||
return 0;
|
||||
throw std::logic_error("not implemented");
|
||||
}
|
||||
|
||||
size_t ChatAPI::restoreState(const uint8_t *src)
|
||||
size_t ChatAPI::restoreState(std::span<const uint8_t> src)
|
||||
{
|
||||
Q_UNUSED(src);
|
||||
return 0;
|
||||
throw std::logic_error("not implemented");
|
||||
}
|
||||
|
||||
void ChatAPI::prompt(const std::string &prompt,
|
||||
|
||||
Reference in New Issue
Block a user