mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2026-03-19 19:42:38 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee6f58443b | ||
|
|
f1d2e306f3 | ||
|
|
83c5d67084 | ||
|
|
291e42dc4b |
@@ -1 +1 @@
|
||||
{".":"0.4.24"}
|
||||
{".":"0.4.26"}
|
||||
14
CHANGELOG.md
14
CHANGELOG.md
@@ -1,5 +1,19 @@
|
||||
# Changelog
|
||||
|
||||
## [0.4.26](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.25...v0.4.26) (2025-10-16)
|
||||
|
||||
|
||||
### Other
|
||||
|
||||
* missing filter arg on serve ([#1583](https://github.com/k8sgpt-ai/k8sgpt/issues/1583)) ([f1d2e30](https://github.com/k8sgpt-ai/k8sgpt/commit/f1d2e306f32eb1e01a2788174084be29a7fa1282))
|
||||
|
||||
## [0.4.25](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.24...v0.4.25) (2025-09-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* fix to broken inference ([#1575](https://github.com/k8sgpt-ai/k8sgpt/issues/1575)) ([291e42d](https://github.com/k8sgpt-ai/k8sgpt/commit/291e42dc4b81ffb0672c21fbb325ddebc5d531a3))
|
||||
|
||||
## [0.4.24](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.23...v0.4.24) (2025-08-18)
|
||||
|
||||
|
||||
|
||||
12
README.md
12
README.md
@@ -62,7 +62,7 @@ brew install k8sgpt
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.24/k8sgpt_386.rpm
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.26/k8sgpt_386.rpm
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
|
||||
@@ -70,7 +70,7 @@ brew install k8sgpt
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.24/k8sgpt_amd64.rpm
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.26/k8sgpt_amd64.rpm
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
</details>
|
||||
@@ -83,7 +83,7 @@ brew install k8sgpt
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.24/k8sgpt_386.deb
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.26/k8sgpt_386.deb
|
||||
sudo dpkg -i k8sgpt_386.deb
|
||||
```
|
||||
|
||||
@@ -94,7 +94,7 @@ sudo dpkg -i k8sgpt_386.deb
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.24/k8sgpt_amd64.deb
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.26/k8sgpt_amd64.deb
|
||||
sudo dpkg -i k8sgpt_amd64.deb
|
||||
```
|
||||
|
||||
@@ -109,7 +109,7 @@ sudo dpkg -i k8sgpt_amd64.deb
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.24/k8sgpt_386.apk
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.26/k8sgpt_386.apk
|
||||
apk add --allow-untrusted k8sgpt_386.apk
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
@@ -118,7 +118,7 @@ sudo dpkg -i k8sgpt_amd64.deb
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.24/k8sgpt_amd64.apk
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.26/k8sgpt_amd64.apk
|
||||
apk add --allow-untrusted k8sgpt_amd64.apk
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
|
||||
@@ -41,6 +41,8 @@ var (
|
||||
enableMCP bool
|
||||
mcpPort string
|
||||
mcpHTTP bool
|
||||
// filters can be injected into the server (repeatable flag)
|
||||
filters []string
|
||||
)
|
||||
|
||||
var ServeCmd = &cobra.Command{
|
||||
@@ -208,6 +210,7 @@ var ServeCmd = &cobra.Command{
|
||||
EnableHttp: enableHttp,
|
||||
Token: aiProvider.Password,
|
||||
Logger: logger,
|
||||
Filters: filters,
|
||||
}
|
||||
go func() {
|
||||
if err := server.ServeMetrics(); err != nil {
|
||||
@@ -237,4 +240,6 @@ func init() {
|
||||
ServeCmd.Flags().BoolVarP(&enableMCP, "mcp", "", false, "Enable Mission Control Protocol server")
|
||||
ServeCmd.Flags().StringVarP(&mcpPort, "mcp-port", "", "8089", "Port to run the MCP server on")
|
||||
ServeCmd.Flags().BoolVarP(&mcpHTTP, "mcp-http", "", false, "Enable HTTP mode for MCP server")
|
||||
// allow injecting filters into the running server (repeatable)
|
||||
ServeCmd.Flags().StringSliceVar(&filters, "filter", []string{}, "Filter to apply (can be specified multiple times)")
|
||||
}
|
||||
|
||||
@@ -458,26 +458,25 @@ func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
|
||||
// Get the inference profile details
|
||||
profile, err := a.getInferenceProfile(context.Background(), modelInput)
|
||||
if err != nil {
|
||||
// Instead of using a fallback model, throw an error
|
||||
return fmt.Errorf("failed to get inference profile: %v", err)
|
||||
} else {
|
||||
// Extract the model ID from the inference profile
|
||||
modelID, err := a.extractModelFromInferenceProfile(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract model ID from inference profile: %v", err)
|
||||
}
|
||||
|
||||
// Find the model configuration for the extracted model ID
|
||||
foundModel, err := a.getModelFromString(modelID)
|
||||
if err != nil {
|
||||
// Instead of using a fallback model, throw an error
|
||||
return fmt.Errorf("failed to find model configuration for %s: %v", modelID, err)
|
||||
}
|
||||
a.model = foundModel
|
||||
|
||||
// Use the inference profile ARN as the model ID for API calls
|
||||
a.model.Config.ModelName = modelInput
|
||||
}
|
||||
// Extract the model ID from the inference profile
|
||||
modelID, err := a.extractModelFromInferenceProfile(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract model ID from inference profile: %v", err)
|
||||
}
|
||||
// Find the model configuration for the extracted model ID
|
||||
foundModel, err := a.getModelFromString(modelID)
|
||||
if err != nil {
|
||||
// Instead of failing, use a generic config for completion/response
|
||||
// But still warn user
|
||||
return fmt.Errorf("failed to find model configuration for %s: %v", modelID, err)
|
||||
}
|
||||
// Use the found model config for completion/response, but set ModelName to the profile ARN
|
||||
a.model = foundModel
|
||||
a.model.Config.ModelName = modelInput
|
||||
// Mark that we're using an inference profile
|
||||
// (could add a field if needed)
|
||||
} else {
|
||||
// Regular model ID provided
|
||||
foundModel, err := a.getModelFromString(modelInput)
|
||||
@@ -562,7 +561,8 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
|
||||
supportedModels[i] = m.Name
|
||||
}
|
||||
|
||||
if !bedrock_support.IsModelSupported(a.model.Config.ModelName, supportedModels) {
|
||||
// Allow valid inference profile ARNs as supported models
|
||||
if !bedrock_support.IsModelSupported(a.model.Config.ModelName, supportedModels) && !validateInferenceProfileArn(a.model.Config.ModelName) {
|
||||
return "", fmt.Errorf("model '%s' is not supported.\nSupported models:\n%s", a.model.Config.ModelName, func() string {
|
||||
s := ""
|
||||
for _, m := range supportedModels {
|
||||
|
||||
@@ -54,6 +54,8 @@ type Config struct {
|
||||
AnalyzeHandler *analyze.Handler
|
||||
QueryHandler *query.Handler
|
||||
Logger *zap.Logger
|
||||
// Filters can be injected into the server to limit analysis to specific analyzers
|
||||
Filters []string
|
||||
metricsServer *http.Server
|
||||
listener net.Listener
|
||||
EnableHttp bool
|
||||
|
||||
Reference in New Issue
Block a user