mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-09-17 15:52:50 +00:00
fix: add default maxToken value of watsonxai backend (#1209)
Signed-off-by: yanweili <yanweili@ibm.com> Co-authored-by: yanweili <yanweili@ibm.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com>
This commit is contained in:
@@ -1,10 +1,10 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
wx "github.com/IBM/watsonx-go/pkg/models"
|
||||
)
|
||||
@@ -24,18 +24,23 @@ type WatsonxAIClient struct {
|
||||
|
||||
const (
|
||||
modelMetallama = "ibm/granite-13b-chat-v2"
|
||||
maxTokens = 2048
|
||||
)
|
||||
|
||||
func (c *WatsonxAIClient) Configure(config IAIConfig) error {
|
||||
if(config.GetModel() == "") {
|
||||
c.model = config.GetModel()
|
||||
} else {
|
||||
if config.GetModel() == "" {
|
||||
c.model = modelMetallama
|
||||
} else {
|
||||
c.model = config.GetModel()
|
||||
}
|
||||
if config.GetMaxTokens() == 0 {
|
||||
c.maxNewTokens = maxTokens
|
||||
} else {
|
||||
c.maxNewTokens = config.GetMaxTokens()
|
||||
}
|
||||
c.temperature = config.GetTemperature()
|
||||
c.topP = config.GetTopP()
|
||||
c.topK = config.GetTopK()
|
||||
c.maxNewTokens = config.GetMaxTokens()
|
||||
|
||||
// WatsonxAPIKeyEnvVarName = "WATSONX_API_KEY"
|
||||
// WatsonxProjectIDEnvVarName = "WATSONX_PROJECT_ID"
|
||||
@@ -75,7 +80,6 @@ func (c *WatsonxAIClient) GetCompletion(ctx context.Context, prompt string) (str
|
||||
if result.Text == "" {
|
||||
return "", errors.New("Expected a result, but got an empty string")
|
||||
}
|
||||
|
||||
return result.Text, nil
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user