mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-17 16:28:20 +00:00
add min_p sampling parameter (#2014)
Signed-off-by: Christopher Barrera <cb@arda.tx.rr.com> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com>
This commit is contained in:
@@ -64,6 +64,15 @@ public unsafe class LLModelPromptContext
|
||||
set => _ctx.top_p = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// min p sampling probability threshold
|
||||
/// </summary>
|
||||
public float MinP
|
||||
{
|
||||
get => _ctx.min_p;
|
||||
set => _ctx.min_p = value;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// temperature to adjust model's output distribution
|
||||
/// </summary>
|
||||
|
@@ -29,6 +29,8 @@ public unsafe partial struct llmodel_prompt_context
|
||||
|
||||
public float top_p;
|
||||
|
||||
public float min_p;
|
||||
|
||||
public float temp;
|
||||
|
||||
[NativeTypeName("int32_t")]
|
||||
|
@@ -16,6 +16,7 @@ internal static class LLPromptContextExtensions
|
||||
n_predict = {ctx.n_predict}
|
||||
top_k = {ctx.top_k}
|
||||
top_p = {ctx.top_p}
|
||||
min_p = {ctx.min_p}
|
||||
temp = {ctx.temp}
|
||||
n_batch = {ctx.n_batch}
|
||||
repeat_penalty = {ctx.repeat_penalty}
|
||||
|
@@ -12,6 +12,7 @@ public static class PredictRequestOptionsExtensions
|
||||
TokensSize = opts.TokensSize,
|
||||
TopK = opts.TopK,
|
||||
TopP = opts.TopP,
|
||||
MinP = opts.MinP,
|
||||
PastNum = opts.PastConversationTokensNum,
|
||||
RepeatPenalty = opts.RepeatPenalty,
|
||||
Temperature = opts.Temperature,
|
||||
|
@@ -16,6 +16,8 @@ public record PredictRequestOptions
|
||||
|
||||
public float TopP { get; init; } = 0.9f;
|
||||
|
||||
public float MinP { get; init; } = 0.0f;
|
||||
|
||||
public float Temperature { get; init; } = 0.1f;
|
||||
|
||||
public int Batches { get; init; } = 8;
|
||||
|
Reference in New Issue
Block a user