mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-01 16:47:18 +00:00
Fix regenerate button to be deterministic and bump the llama version to latest we have for gguf.
This commit is contained in:
@@ -371,7 +371,7 @@ void ChatLLM::regenerateResponse()
|
||||
else
|
||||
m_ctx.n_past -= m_promptResponseTokens;
|
||||
m_ctx.n_past = std::max(0, m_ctx.n_past);
|
||||
m_ctx.tokens.erase(m_ctx.tokens.end() -= m_promptResponseTokens, m_ctx.tokens.end());
|
||||
m_ctx.tokens.erase(m_ctx.tokens.end() - m_promptResponseTokens, m_ctx.tokens.end());
|
||||
m_promptResponseTokens = 0;
|
||||
m_promptTokens = 0;
|
||||
m_response = std::string();
|
||||
|
Reference in New Issue
Block a user