mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-06 11:00:48 +00:00
backend: fix buffer overrun in repeat penalty code
Caught with AddressSanitizer running a basic prompt test against llmodel standalone. This fix allows ASan builds to complete a simple prompt without illegal accesses but there are still notably several leaks.
This commit is contained in:
@@ -993,9 +993,10 @@ void GPTJ::prompt(const std::string &prompt,
|
||||
gpt_vocab::id id = 0;
|
||||
{
|
||||
const int64_t t_start_sample_us = ggml_time_us();
|
||||
const size_t n_prev_toks = std::min((size_t) promptCtx.repeat_last_n, promptCtx.tokens.size());
|
||||
id = gpt_sample_top_k_top_p(d_ptr->vocab, n_vocab,
|
||||
promptCtx.tokens.data() + promptCtx.n_ctx - promptCtx.n_ctx,
|
||||
promptCtx.n_ctx,
|
||||
promptCtx.tokens.data() + promptCtx.tokens.size() - n_prev_toks,
|
||||
n_prev_toks,
|
||||
promptCtx.logits,
|
||||
promptCtx.top_k, promptCtx.top_p, promptCtx.temp,
|
||||
promptCtx.repeat_penalty,
|
||||
|
Reference in New Issue
Block a user