mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-10-26 19:12:46 +00:00
add min_p sampling parameter (#2014)
Signed-off-by: Christopher Barrera <cb@arda.tx.rr.com> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com>
This commit is contained in:
@@ -36,7 +36,7 @@ std::string res = "";
|
||||
void * mm;
|
||||
|
||||
void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
|
||||
float top_p, float temp, int n_batch,float ctx_erase)
|
||||
float top_p, float min_p, float temp, int n_batch,float ctx_erase)
|
||||
{
|
||||
llmodel_model* model = (llmodel_model*) m;
|
||||
|
||||
@@ -69,6 +69,7 @@ void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n,
|
||||
.n_predict = 50,
|
||||
.top_k = 10,
|
||||
.top_p = 0.9,
|
||||
.min_p = 0.0,
|
||||
.temp = 1.0,
|
||||
.n_batch = 1,
|
||||
.repeat_penalty = 1.2,
|
||||
@@ -83,6 +84,7 @@ void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n,
|
||||
prompt_context->top_k = top_k;
|
||||
prompt_context->context_erase = ctx_erase;
|
||||
prompt_context->top_p = top_p;
|
||||
prompt_context->min_p = min_p;
|
||||
prompt_context->temp = temp;
|
||||
prompt_context->n_batch = n_batch;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user