mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-09-23 20:28:27 +00:00
feat: implement Top-K sampling for improved user control (#1110)
This commit adds Top-K sampling, a feature that allows users to control the randomness of the generated text by specifying the number of most probable next words considered by the model. This enhances user control and potentially improves the quality of the generated outputs. Fixes: https://github.com/k8sgpt-ai/k8sgpt/issues/1105 Signed-off-by: VaibhavMalik4187 <vaibhavmalik2018@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com>
This commit is contained in:
@@ -33,6 +33,7 @@ type SageMakerAIClient struct {
|
||||
temperature float32
|
||||
endpoint string
|
||||
topP float32
|
||||
topK int32
|
||||
maxTokens int
|
||||
}
|
||||
|
||||
@@ -56,6 +57,7 @@ type Message struct {
|
||||
type Parameters struct {
|
||||
MaxNewTokens int `json:"max_new_tokens"`
|
||||
TopP float64 `json:"top_p"`
|
||||
TopK float64 `json:"top_k"`
|
||||
Temperature float64 `json:"temperature"`
|
||||
}
|
||||
|
||||
@@ -74,6 +76,7 @@ func (c *SageMakerAIClient) Configure(config IAIConfig) error {
|
||||
c.temperature = config.GetTemperature()
|
||||
c.maxTokens = config.GetMaxTokens()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -90,6 +93,7 @@ func (c *SageMakerAIClient) GetCompletion(_ context.Context, prompt string) (str
|
||||
Parameters: Parameters{
|
||||
MaxNewTokens: int(c.maxTokens),
|
||||
TopP: float64(c.topP),
|
||||
TopK: float64(c.topK),
|
||||
Temperature: float64(c.temperature),
|
||||
},
|
||||
}
|
||||
|
Reference in New Issue
Block a user