mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-04-27 19:15:24 +00:00
Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f603948935 | ||
|
67f5855695 | ||
|
ebb0373f69 | ||
|
3b6ad06de1 | ||
|
443469960a | ||
|
17863c24d5 | ||
|
e588fc316d | ||
|
a128906136 | ||
|
0553b984b7 | ||
|
96d86d3eb0 | ||
|
df17e3e728 | ||
|
80904e3063 | ||
|
cf6f9289e1 | ||
|
a79224e2bf | ||
|
9ce33469d8 | ||
|
969fe99b33 | ||
|
91d423b147 |
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@ -127,7 +127,7 @@ jobs:
|
||||
output-file: ./sbom-${{ env.IMAGE_NAME }}.spdx.json
|
||||
|
||||
- name: Attach SBOM to release
|
||||
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2
|
||||
uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2
|
||||
with:
|
||||
tag_name: ${{ needs.release-please.outputs.tag_name }}
|
||||
files: ./sbom-${{ env.IMAGE_NAME }}.spdx.json
|
||||
|
2
.github/workflows/test.yaml
vendored
2
.github/workflows/test.yaml
vendored
@ -25,6 +25,6 @@ jobs:
|
||||
- name: Run test
|
||||
run: go test ./... -coverprofile=coverage.txt
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 # v5
|
||||
uses: codecov/codecov-action@ad3126e916f78f00edff4ed0317cf185271ccc2d # v5
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
@ -70,8 +70,28 @@ checksum:
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
|
||||
# skip: true
|
||||
# The lines beneath this are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
announce:
|
||||
slack:
|
||||
# Whether its enabled or not.
|
||||
#
|
||||
# Templates: allowed (since v2.6).
|
||||
enabled: true
|
||||
|
||||
# Message template to use while publishing.
|
||||
#
|
||||
# Default: '{{ .ProjectName }} {{ .Tag }} is out! Check it out at {{ .ReleaseURL }}'.
|
||||
# Templates: allowed.
|
||||
message_template: "{{ .ProjectName }} release {{.Tag}} is out!"
|
||||
|
||||
# The name of the channel that the user selected as a destination for webhook messages.
|
||||
channel: "#general"
|
||||
|
||||
# Set your Webhook's user name.
|
||||
username: "K8sGPT"
|
||||
|
||||
# Emoji to use as the icon for this message. Overrides icon_url.
|
||||
icon_emoji: ""
|
||||
|
||||
# URL to an image to use as the icon for this message.
|
||||
icon_url: ""
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{".":"0.4.10"}
|
||||
{".":"0.4.13"}
|
55
CHANGELOG.md
55
CHANGELOG.md
@ -1,5 +1,60 @@
|
||||
# Changelog
|
||||
|
||||
## [0.4.13](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.12...v0.4.13) (2025-04-22)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* slack announce ([#1466](https://github.com/k8sgpt-ai/k8sgpt/issues/1466)) ([3b6ad06](https://github.com/k8sgpt-ai/k8sgpt/commit/3b6ad06de1121c870fb486e0fe2bd1f87be16627))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* reverse hpa ScalingLimited error condition ([#1366](https://github.com/k8sgpt-ai/k8sgpt/issues/1366)) ([ebb0373](https://github.com/k8sgpt-ai/k8sgpt/commit/ebb0373f69ad64a6cc43d0695d07e1d076c6366e))
|
||||
|
||||
|
||||
### Other
|
||||
|
||||
* **deps:** update softprops/action-gh-release digest to da05d55 ([#1464](https://github.com/k8sgpt-ai/k8sgpt/issues/1464)) ([4434699](https://github.com/k8sgpt-ai/k8sgpt/commit/443469960a6b6791e358ee0a97e4c1dc5c3018e6))
|
||||
|
||||
## [0.4.12](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.11...v0.4.12) (2025-04-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* new analyzers ([#1459](https://github.com/k8sgpt-ai/k8sgpt/issues/1459)) ([a128906](https://github.com/k8sgpt-ai/k8sgpt/commit/a128906136431189812d4d2dea68ea98cbfe5eeb))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **deps:** update module golang.org/x/net to v0.38.0 [security] ([#1462](https://github.com/k8sgpt-ai/k8sgpt/issues/1462)) ([e588fc3](https://github.com/k8sgpt-ai/k8sgpt/commit/e588fc316d29a29a7dde6abe2302833b38f1d302))
|
||||
|
||||
|
||||
### Other
|
||||
|
||||
* **deps:** update codecov/codecov-action digest to ad3126e ([#1456](https://github.com/k8sgpt-ai/k8sgpt/issues/1456)) ([0553b98](https://github.com/k8sgpt-ai/k8sgpt/commit/0553b984b7c87b345f171bf6e5d632d890db689c))
|
||||
|
||||
## [0.4.11](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.10...v0.4.11) (2025-04-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add verbose flag to enable detailed output ([#1420](https://github.com/k8sgpt-ai/k8sgpt/issues/1420)) ([a79224e](https://github.com/k8sgpt-ai/k8sgpt/commit/a79224e2bf96f458dbc96404c8f4847970e8d2ef))
|
||||
* call bedrock with inference profile ([#1449](https://github.com/k8sgpt-ai/k8sgpt/issues/1449)) ([91d423b](https://github.com/k8sgpt-ai/k8sgpt/commit/91d423b147ca18cda7d54ff19349938a894ecb85))
|
||||
* improved test coverage ([#1455](https://github.com/k8sgpt-ai/k8sgpt/issues/1455)) ([80904e3](https://github.com/k8sgpt-ai/k8sgpt/commit/80904e3063b00b0536171b7b62b938938b20825a))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* config ai provider in query ([#1457](https://github.com/k8sgpt-ai/k8sgpt/issues/1457)) ([df17e3e](https://github.com/k8sgpt-ai/k8sgpt/commit/df17e3e728591e974703527dff86de882af17790))
|
||||
* **deps:** update module gopkg.in/yaml.v2 to v3 ([#1447](https://github.com/k8sgpt-ai/k8sgpt/issues/1447)) ([969fe99](https://github.com/k8sgpt-ai/k8sgpt/commit/969fe99b3320c313f1c97133cdffb668a00d5fb5))
|
||||
* **deps:** update module gopkg.in/yaml.v2 to v3 ([#1453](https://github.com/k8sgpt-ai/k8sgpt/issues/1453)) ([cf6f928](https://github.com/k8sgpt-ai/k8sgpt/commit/cf6f9289e13ee729c24968fd771c901f412e8db7))
|
||||
|
||||
|
||||
### Docs
|
||||
|
||||
* fix the slack invite link ([#1450](https://github.com/k8sgpt-ai/k8sgpt/issues/1450)) ([9ce3346](https://github.com/k8sgpt-ai/k8sgpt/commit/9ce33469d85aa0829e995e4b404ae85734124fb4))
|
||||
|
||||
## [0.4.10](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.9...v0.4.10) (2025-04-10)
|
||||
|
||||
|
||||
|
19
README.md
19
README.md
@ -62,7 +62,7 @@ brew install k8sgpt
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.10/k8sgpt_386.rpm
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.13/k8sgpt_386.rpm
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
|
||||
@ -70,7 +70,7 @@ brew install k8sgpt
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.10/k8sgpt_amd64.rpm
|
||||
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.13/k8sgpt_amd64.rpm
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
</details>
|
||||
@ -83,7 +83,7 @@ brew install k8sgpt
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.10/k8sgpt_386.deb
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.13/k8sgpt_386.deb
|
||||
sudo dpkg -i k8sgpt_386.deb
|
||||
```
|
||||
|
||||
@ -94,7 +94,7 @@ sudo dpkg -i k8sgpt_386.deb
|
||||
<!---x-release-please-start-version-->
|
||||
|
||||
```
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.10/k8sgpt_amd64.deb
|
||||
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.13/k8sgpt_amd64.deb
|
||||
sudo dpkg -i k8sgpt_amd64.deb
|
||||
```
|
||||
|
||||
@ -109,7 +109,7 @@ sudo dpkg -i k8sgpt_amd64.deb
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.10/k8sgpt_386.apk
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.13/k8sgpt_386.apk
|
||||
apk add --allow-untrusted k8sgpt_386.apk
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
@ -118,7 +118,7 @@ sudo dpkg -i k8sgpt_amd64.deb
|
||||
|
||||
<!---x-release-please-start-version-->
|
||||
```
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.10/k8sgpt_amd64.apk
|
||||
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.13/k8sgpt_amd64.apk
|
||||
apk add --allow-untrusted k8sgpt_amd64.apk
|
||||
```
|
||||
<!---x-release-please-end-->
|
||||
@ -196,6 +196,9 @@ you will be able to write your own analyzers.
|
||||
- [x] gateway
|
||||
- [x] httproute
|
||||
- [x] logAnalyzer
|
||||
- [x] storageAnalyzer
|
||||
- [x] securityAnalyzer
|
||||
- [x] configMapAnalyzer
|
||||
|
||||
## Examples
|
||||
|
||||
@ -592,7 +595,7 @@ Please read our [contributing guide](./CONTRIBUTING.md).
|
||||
|
||||
## Community
|
||||
|
||||
Find us on [Slack](https://join.slack.com/t/k8sgpt/shared_invite/zt-276pa9uyq-pxAUr4TCVHubFxEvLZuT1Q)
|
||||
Find us on [Slack](https://join.slack.com/t/k8sgpt/shared_invite/zt-332vhyaxv-bfjJwHZLXWVCB3QaXafEYQ)
|
||||
|
||||
<a href="https://github.com/k8sgpt-ai/k8sgpt/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=k8sgpt-ai/k8sgpt" />
|
||||
@ -600,4 +603,4 @@ Find us on [Slack](https://join.slack.com/t/k8sgpt/shared_invite/zt-276pa9uyq-px
|
||||
|
||||
## License
|
||||
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fk8sgpt-ai%2Fk8sgpt?ref=badge_large)
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fk8sgpt-ai%2Fk8sgpt?ref=badge_large)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/interactive"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -67,25 +68,45 @@ var AnalyzeCmd = &cobra.Command{
|
||||
withStats,
|
||||
)
|
||||
|
||||
verbose := viper.GetBool("verbose")
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking analysis configuration.")
|
||||
}
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if verbose {
|
||||
fmt.Println("Debug: Analysis initialized.")
|
||||
}
|
||||
defer config.Close()
|
||||
|
||||
if customAnalysis {
|
||||
config.RunCustomAnalysis()
|
||||
if verbose {
|
||||
fmt.Println("Debug: All custom analyzers completed.")
|
||||
}
|
||||
}
|
||||
config.RunAnalysis()
|
||||
if verbose {
|
||||
fmt.Println("Debug: All core analyzers completed.")
|
||||
}
|
||||
|
||||
if explain {
|
||||
if err := config.GetAIResults(output, anonymize); err != nil {
|
||||
err := config.GetAIResults(output, anonymize)
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking AI results.")
|
||||
}
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// print results
|
||||
output_data, err := config.PrintOutput(output)
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking output.")
|
||||
}
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
|
@ -37,6 +37,7 @@ var (
|
||||
cfgFile string
|
||||
kubecontext string
|
||||
kubeconfig string
|
||||
verbose bool
|
||||
Version string
|
||||
Commit string
|
||||
Date string
|
||||
@ -84,6 +85,7 @@ func init() {
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", fmt.Sprintf("Default config file (%s/k8sgpt/k8sgpt.yaml)", xdg.ConfigHome))
|
||||
rootCmd.PersistentFlags().StringVar(&kubecontext, "kubecontext", "", "Kubernetes context to use. Only required if out-of-cluster.")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Show detailed tool actions (e.g., API calls, checks).")
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
@ -104,6 +106,7 @@ func initConfig() {
|
||||
|
||||
viper.Set("kubecontext", kubecontext)
|
||||
viper.Set("kubeconfig", kubeconfig)
|
||||
viper.Set("verbose", verbose)
|
||||
|
||||
viper.SetEnvPrefix("K8SGPT")
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
|
30
cmd/root_test.go
Normal file
30
cmd/root_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Test that verbose flag is correctly set in viper.
|
||||
func TestInitConfig_VerboseFlag(t *testing.T) {
|
||||
verbose = true
|
||||
viper.Reset()
|
||||
initConfig()
|
||||
if !viper.GetBool("verbose") {
|
||||
t.Error("Expected verbose flag to be true")
|
||||
}
|
||||
}
|
8
go.mod
8
go.mod
@ -35,6 +35,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
|
||||
github.com/IBM/watsonx-go v1.0.1
|
||||
github.com/agiledragon/gomonkey/v2 v2.13.0
|
||||
github.com/aws/aws-sdk-go v1.55.6
|
||||
github.com/cohere-ai/cohere-go/v2 v2.12.2
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
@ -91,11 +92,11 @@ require (
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/expr-lang/expr v1.16.9 // indirect
|
||||
github.com/expr-lang/expr v1.17.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
@ -120,6 +121,7 @@ require (
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
@ -243,7 +245,7 @@ require (
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
|
16
go.sum
16
go.sum
@ -713,6 +713,8 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/O
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/agiledragon/gomonkey/v2 v2.13.0 h1:B24Jg6wBI1iB8EFR1c+/aoTg7QN/Cum7YffG8KMIyYo=
|
||||
github.com/agiledragon/gomonkey/v2 v2.13.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
|
||||
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
@ -870,6 +872,8 @@ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
|
||||
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/expr-lang/expr v1.17.2 h1:o0A99O/Px+/DTjEnQiodAgOIK9PPxL8DtXhBRKC+Iso=
|
||||
github.com/expr-lang/expr v1.17.2/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
@ -936,8 +940,11 @@ github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeH
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
@ -1075,6 +1082,7 @@ github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/Q
|
||||
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
|
||||
github.com/gophercloud/gophercloud/v2 v2.4.0 h1:XhP5tVEH3ni66NSNK1+0iSO6kaGPH/6srtx6Cr+8eCg=
|
||||
github.com/gophercloud/gophercloud/v2 v2.4.0/go.mod h1:uJWNpTgJPSl2gyzJqcU/pIAhFUWvIkp8eE8M15n9rs4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
|
||||
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
@ -1152,6 +1160,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
@ -1382,6 +1391,8 @@ github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+D
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg=
|
||||
github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
@ -1622,8 +1633,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1802,6 +1813,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/bedrockruntime/bedrockruntimeiface"
|
||||
@ -293,6 +294,11 @@ func (a *AmazonBedRockClient) getModelFromString(model string) (*bedrock_support
|
||||
strings.Contains(modelConfigNameLower, modelLower) || strings.Contains(modelLower, modelConfigNameLower) {
|
||||
// Create a copy to avoid returning a pointer to a loop variable
|
||||
modelCopy := a.models[i]
|
||||
// for partial match, set the model name to the input string if it is a valid ARN
|
||||
if validateModelArn(modelLower) {
|
||||
modelCopy.Config.ModelName = modelLower
|
||||
}
|
||||
|
||||
return &modelCopy, nil
|
||||
}
|
||||
}
|
||||
@ -300,6 +306,11 @@ func (a *AmazonBedRockClient) getModelFromString(model string) (*bedrock_support
|
||||
return nil, fmt.Errorf("model '%s' not found in supported models", model)
|
||||
}
|
||||
|
||||
func validateModelArn(model string) bool {
|
||||
var re = regexp.MustCompile(`(?m)^arn:(?P<Partition>[^:\n]*):bedrock:(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$`)
|
||||
return re.MatchString(model)
|
||||
}
|
||||
|
||||
// Configure configures the AmazonBedRockClient with the provided configuration.
|
||||
func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
|
||||
// Initialize models if not already initialized
|
||||
@ -326,7 +337,7 @@ func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
|
||||
// Create a new BedrockRuntime client
|
||||
a.client = bedrockruntime.New(sess)
|
||||
a.model = foundModel
|
||||
a.model.Config.ModelName = foundModel.Name
|
||||
a.model.Config.ModelName = foundModel.Config.ModelName
|
||||
a.temperature = config.GetTemperature()
|
||||
a.topP = config.GetTopP()
|
||||
a.maxTokens = config.GetMaxTokens()
|
||||
@ -349,7 +360,7 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
|
||||
// Build the parameters for the model invocation
|
||||
params := &bedrockruntime.InvokeModelInput{
|
||||
Body: body,
|
||||
ModelId: aws.String(a.model.Name),
|
||||
ModelId: aws.String(a.model.Config.ModelName),
|
||||
ContentType: aws.String("application/json"),
|
||||
Accept: aws.String("application/json"),
|
||||
}
|
||||
|
@ -41,7 +41,40 @@ func TestBedrockModelConfig(t *testing.T) {
|
||||
assert.Equal(t, foundModel.Config.MaxTokens, 100)
|
||||
assert.Equal(t, foundModel.Config.Temperature, float32(0.5))
|
||||
assert.Equal(t, foundModel.Config.TopP, float32(0.9))
|
||||
assert.Equal(t, foundModel.Config.ModelName, "anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
assert.Equal(t, foundModel.Config.ModelName, "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
}
|
||||
|
||||
func TestBedrockInvalidModel(t *testing.T) {
|
||||
client := &AmazonBedRockClient{models: testModels}
|
||||
|
||||
foundModel, err := client.getModelFromString("arn:aws:s3:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0")
|
||||
assert.Nil(t, err, "Error should be nil")
|
||||
assert.Equal(t, foundModel.Config.MaxTokens, 100)
|
||||
}
|
||||
|
||||
func TestBedrockGetCompletionInferenceProfile(t *testing.T) {
|
||||
modelName := "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0"
|
||||
var inferenceModelModels = []bedrock_support.BedrockModel{
|
||||
{
|
||||
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
Completion: &bedrock_support.CohereMessagesCompletion{},
|
||||
Response: &bedrock_support.CohereMessagesResponse{},
|
||||
Config: bedrock_support.BedrockModelConfig{
|
||||
MaxTokens: 100,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.9,
|
||||
ModelName: modelName,
|
||||
},
|
||||
},
|
||||
}
|
||||
client := &AmazonBedRockClient{models: inferenceModelModels}
|
||||
|
||||
config := AIProvider{
|
||||
Model: modelName,
|
||||
}
|
||||
err := client.Configure(&config)
|
||||
assert.Nil(t, err, "Error should be nil")
|
||||
assert.Equal(t, modelName, client.model.Config.ModelName, "Model name should match")
|
||||
}
|
||||
|
||||
func TestGetModelFromString(t *testing.T) {
|
||||
|
@ -173,6 +173,20 @@ func TestAmazonCompletion_GetCompletion_UnsupportedModel(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "model unsupported-model is not supported")
|
||||
}
|
||||
|
||||
func TestAmazonCompletion_GetCompletion_Inference_Profile(t *testing.T) {
|
||||
completion := &AmazonCompletion{}
|
||||
modelConfig := BedrockModelConfig{
|
||||
MaxTokens: 200,
|
||||
Temperature: 0.5,
|
||||
TopP: 0.7,
|
||||
ModelName: "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
}
|
||||
prompt := "Test prompt"
|
||||
|
||||
_, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_isModelSupported(t *testing.T) {
|
||||
assert.True(t, isModelSupported("anthropic.claude-v2"))
|
||||
assert.False(t, isModelSupported("unsupported-model"))
|
||||
|
87
pkg/ai/factory.go
Normal file
87
pkg/ai/factory.go
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ai
|
||||
|
||||
import (
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// AIClientFactory is an interface for creating AI clients
|
||||
type AIClientFactory interface {
|
||||
NewClient(provider string) IAI
|
||||
}
|
||||
|
||||
// DefaultAIClientFactory is the default implementation of AIClientFactory
|
||||
type DefaultAIClientFactory struct{}
|
||||
|
||||
// NewClient creates a new AI client using the default implementation
|
||||
func (f *DefaultAIClientFactory) NewClient(provider string) IAI {
|
||||
return NewClient(provider)
|
||||
}
|
||||
|
||||
// ConfigProvider is an interface for accessing configuration
|
||||
type ConfigProvider interface {
|
||||
UnmarshalKey(key string, rawVal interface{}) error
|
||||
}
|
||||
|
||||
// ViperConfigProvider is the default implementation of ConfigProvider using Viper
|
||||
type ViperConfigProvider struct{}
|
||||
|
||||
// UnmarshalKey unmarshals a key from the configuration using Viper
|
||||
func (p *ViperConfigProvider) UnmarshalKey(key string, rawVal interface{}) error {
|
||||
return viper.UnmarshalKey(key, rawVal)
|
||||
}
|
||||
|
||||
// Default instances to be used
|
||||
var (
|
||||
DefaultClientFactory = &DefaultAIClientFactory{}
|
||||
DefaultConfigProvider = &ViperConfigProvider{}
|
||||
)
|
||||
|
||||
// For testing - these variables can be overridden in tests
|
||||
var (
|
||||
testAIClientFactory AIClientFactory = nil
|
||||
testConfigProvider ConfigProvider = nil
|
||||
)
|
||||
|
||||
// GetAIClientFactory returns the test factory if set, otherwise the default
|
||||
func GetAIClientFactory() AIClientFactory {
|
||||
if testAIClientFactory != nil {
|
||||
return testAIClientFactory
|
||||
}
|
||||
return DefaultClientFactory
|
||||
}
|
||||
|
||||
// GetConfigProvider returns the test provider if set, otherwise the default
|
||||
func GetConfigProvider() ConfigProvider {
|
||||
if testConfigProvider != nil {
|
||||
return testConfigProvider
|
||||
}
|
||||
return DefaultConfigProvider
|
||||
}
|
||||
|
||||
// For testing - set the test implementations
|
||||
func SetTestAIClientFactory(factory AIClientFactory) {
|
||||
testAIClientFactory = factory
|
||||
}
|
||||
|
||||
func SetTestConfigProvider(provider ConfigProvider) {
|
||||
testConfigProvider = provider
|
||||
}
|
||||
|
||||
// Reset test implementations
|
||||
func ResetTestImplementations() {
|
||||
testAIClientFactory = nil
|
||||
testConfigProvider = nil
|
||||
}
|
@ -18,6 +18,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -89,19 +90,35 @@ func NewAnalysis(
|
||||
// Get kubernetes client from viper.
|
||||
kubecontext := viper.GetString("kubecontext")
|
||||
kubeconfig := viper.GetString("kubeconfig")
|
||||
verbose := viper.GetBool("verbose")
|
||||
client, err := kubernetes.NewClient(kubecontext, kubeconfig)
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking kubernetes client initialization.")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initialising kubernetes client: %w", err)
|
||||
}
|
||||
if verbose {
|
||||
fmt.Printf("Debug: Kubernetes client initialized, server=%s.\n", client.Config.Host)
|
||||
}
|
||||
|
||||
// Load remote cache if it is configured.
|
||||
cache, err := cache.GetCacheConfiguration()
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking cache configuration.")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if verbose {
|
||||
fmt.Printf("Debug: Cache configuration loaded, type=%s.\n", cache.GetName())
|
||||
}
|
||||
|
||||
if noCache {
|
||||
cache.DisableCache()
|
||||
if verbose {
|
||||
fmt.Println("Debug: Cache disabled.")
|
||||
}
|
||||
}
|
||||
|
||||
a := &Analysis{
|
||||
@ -117,12 +134,31 @@ func NewAnalysis(
|
||||
WithDoc: withDoc,
|
||||
WithStats: withStats,
|
||||
}
|
||||
if verbose {
|
||||
fmt.Print("Debug: Analysis configuration loaded, ")
|
||||
fmt.Printf("filters=%v, language=%s, ", filters, language)
|
||||
if namespace == "" {
|
||||
fmt.Printf("namespace=none, ")
|
||||
} else {
|
||||
fmt.Printf("namespace=%s, ", namespace)
|
||||
}
|
||||
if labelSelector == "" {
|
||||
fmt.Printf("labelSelector=none, ")
|
||||
} else {
|
||||
fmt.Printf("labelSelector=%s, ", labelSelector)
|
||||
}
|
||||
fmt.Printf("explain=%t, maxConcurrency=%d, ", explain, maxConcurrency)
|
||||
fmt.Printf("withDoc=%t, withStats=%t.\n", withDoc, withStats)
|
||||
}
|
||||
if !explain {
|
||||
// Return early if AI use was not requested.
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var configAI ai.AIConfiguration
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking AI configuration.")
|
||||
}
|
||||
if err := viper.UnmarshalKey("ai", &configAI); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -135,10 +171,16 @@ func NewAnalysis(
|
||||
// Hence, use the default provider only if the backend is not specified by the user.
|
||||
if configAI.DefaultProvider != "" && backend == "" {
|
||||
backend = configAI.DefaultProvider
|
||||
if verbose {
|
||||
fmt.Printf("Debug: Using default AI provider %s.\n", backend)
|
||||
}
|
||||
}
|
||||
|
||||
if backend == "" {
|
||||
backend = "openai"
|
||||
if verbose {
|
||||
fmt.Printf("Debug: Using default AI provider %s.\n", backend)
|
||||
}
|
||||
}
|
||||
|
||||
var aiProvider ai.AIProvider
|
||||
@ -153,12 +195,23 @@ func NewAnalysis(
|
||||
return nil, fmt.Errorf("AI provider %s not specified in configuration. Please run k8sgpt auth", backend)
|
||||
}
|
||||
|
||||
if verbose {
|
||||
fmt.Printf("Debug: AI configuration loaded, provider=%s, ", backend)
|
||||
fmt.Printf("baseUrl=%s, model=%s.\n", aiProvider.BaseURL, aiProvider.Model)
|
||||
}
|
||||
|
||||
aiClient := ai.NewClient(aiProvider.Name)
|
||||
customHeaders := util.NewHeaders(httpHeaders)
|
||||
aiProvider.CustomHeaders = customHeaders
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking AI client initialization.")
|
||||
}
|
||||
if err := aiClient.Configure(&aiProvider); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if verbose {
|
||||
fmt.Println("Debug: AI client initialized.")
|
||||
}
|
||||
a.AIClient = aiClient
|
||||
a.AnalysisAIProvider = aiProvider.Name
|
||||
return a, nil
|
||||
@ -182,6 +235,18 @@ func (a *Analysis) RunCustomAnalysis() {
|
||||
semaphore := make(chan struct{}, a.MaxConcurrency)
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
verbose := viper.GetBool("verbose")
|
||||
if verbose {
|
||||
if len(customAnalyzers) == 0 {
|
||||
fmt.Println("Debug: No custom analyzers found.")
|
||||
} else {
|
||||
cAnalyzerNames := make([]string, len(customAnalyzers))
|
||||
for i, cAnalyzer := range customAnalyzers {
|
||||
cAnalyzerNames[i] = cAnalyzer.Name
|
||||
}
|
||||
fmt.Printf("Debug: Found custom analyzers %v.\n", cAnalyzerNames)
|
||||
}
|
||||
}
|
||||
for _, cAnalyzer := range customAnalyzers {
|
||||
wg.Add(1)
|
||||
semaphore <- struct{}{}
|
||||
@ -194,6 +259,9 @@ func (a *Analysis) RunCustomAnalysis() {
|
||||
mutex.Unlock()
|
||||
return
|
||||
}
|
||||
if verbose {
|
||||
fmt.Printf("Debug: %s launched.\n", cAnalyzer.Name)
|
||||
}
|
||||
|
||||
result, err := canClient.Run()
|
||||
if result.Kind == "" {
|
||||
@ -206,10 +274,16 @@ func (a *Analysis) RunCustomAnalysis() {
|
||||
mutex.Lock()
|
||||
a.Errors = append(a.Errors, fmt.Sprintf("[%s] %s", cAnalyzer.Name, err))
|
||||
mutex.Unlock()
|
||||
if verbose {
|
||||
fmt.Printf("Debug: %s completed with errors.\n", cAnalyzer.Name)
|
||||
}
|
||||
} else {
|
||||
mutex.Lock()
|
||||
a.Results = append(a.Results, result)
|
||||
mutex.Unlock()
|
||||
if verbose {
|
||||
fmt.Printf("Debug: %s completed without errors.\n", cAnalyzer.Name)
|
||||
}
|
||||
}
|
||||
<-semaphore
|
||||
}(cAnalyzer, &wg, semaphore)
|
||||
@ -219,6 +293,7 @@ func (a *Analysis) RunCustomAnalysis() {
|
||||
|
||||
func (a *Analysis) RunAnalysis() {
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
verbose := viper.GetBool("verbose")
|
||||
|
||||
coreAnalyzerMap, analyzerMap := analyzer.GetAnalyzerMap()
|
||||
|
||||
@ -227,7 +302,13 @@ func (a *Analysis) RunAnalysis() {
|
||||
if a.WithDoc {
|
||||
var openApiErr error
|
||||
|
||||
if verbose {
|
||||
fmt.Println("Debug: Fetching Kubernetes docs.")
|
||||
}
|
||||
openapiSchema, openApiErr = a.Client.Client.Discovery().OpenAPISchema()
|
||||
if verbose {
|
||||
fmt.Println("Debug: Checking Kubernetes docs.")
|
||||
}
|
||||
if openApiErr != nil {
|
||||
a.Errors = append(a.Errors, fmt.Sprintf("[KubernetesDoc] %s", openApiErr))
|
||||
}
|
||||
@ -247,6 +328,9 @@ func (a *Analysis) RunAnalysis() {
|
||||
var mutex sync.Mutex
|
||||
// if there are no filters selected and no active_filters then run coreAnalyzer
|
||||
if len(a.Filters) == 0 && len(activeFilters) == 0 {
|
||||
if verbose {
|
||||
fmt.Println("Debug: No filters selected and no active filters found, run all core analyzers.")
|
||||
}
|
||||
for name, analyzer := range coreAnalyzerMap {
|
||||
wg.Add(1)
|
||||
semaphore <- struct{}{}
|
||||
@ -258,6 +342,9 @@ func (a *Analysis) RunAnalysis() {
|
||||
}
|
||||
// if the filters flag is specified
|
||||
if len(a.Filters) != 0 {
|
||||
if verbose {
|
||||
fmt.Printf("Debug: Filter flags %v specified, run selected core analyzers.\n", a.Filters)
|
||||
}
|
||||
for _, filter := range a.Filters {
|
||||
if analyzer, ok := analyzerMap[filter]; ok {
|
||||
semaphore <- struct{}{}
|
||||
@ -272,6 +359,9 @@ func (a *Analysis) RunAnalysis() {
|
||||
}
|
||||
|
||||
// use active_filters
|
||||
if len(activeFilters) > 0 && verbose {
|
||||
fmt.Printf("Debug: Found active filters %v, run selected core analyzers.\n", activeFilters)
|
||||
}
|
||||
for _, filter := range activeFilters {
|
||||
if analyzer, ok := analyzerMap[filter]; ok {
|
||||
semaphore <- struct{}{}
|
||||
@ -294,6 +384,10 @@ func (a *Analysis) executeAnalyzer(analyzer common.IAnalyzer, filter string, ana
|
||||
}
|
||||
|
||||
// Run the analyzer
|
||||
verbose := viper.GetBool("verbose")
|
||||
if verbose {
|
||||
fmt.Printf("Debug: %s launched.\n", reflect.TypeOf(analyzer).Name())
|
||||
}
|
||||
results, err := analyzer.Analyze(analyzerConfig)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
@ -315,11 +409,17 @@ func (a *Analysis) executeAnalyzer(analyzer common.IAnalyzer, filter string, ana
|
||||
a.Stats = append(a.Stats, stat)
|
||||
}
|
||||
a.Errors = append(a.Errors, fmt.Sprintf("[%s] %s", filter, err))
|
||||
if verbose {
|
||||
fmt.Printf("Debug: %s completed with errors.\n", reflect.TypeOf(analyzer).Name())
|
||||
}
|
||||
} else {
|
||||
if a.WithStats {
|
||||
a.Stats = append(a.Stats, stat)
|
||||
}
|
||||
a.Results = append(a.Results, results...)
|
||||
if verbose {
|
||||
fmt.Printf("Debug: %s completed without errors.\n", reflect.TypeOf(analyzer).Name())
|
||||
}
|
||||
}
|
||||
<-semaphore
|
||||
}
|
||||
@ -329,6 +429,11 @@ func (a *Analysis) GetAIResults(output string, anonymize bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
verbose := viper.GetBool("verbose")
|
||||
if verbose {
|
||||
fmt.Println("Debug: Generating AI analysis.")
|
||||
}
|
||||
|
||||
var bar *progressbar.ProgressBar
|
||||
if output != "json" {
|
||||
bar = progressbar.Default(int64(len(a.Results)))
|
||||
@ -337,6 +442,10 @@ func (a *Analysis) GetAIResults(output string, anonymize bool) error {
|
||||
for index, analysis := range a.Results {
|
||||
var texts []string
|
||||
|
||||
if bar != nil && verbose {
|
||||
bar.Describe(fmt.Sprintf("Analyzing %s", analysis.Kind))
|
||||
}
|
||||
|
||||
for _, failure := range analysis.Error {
|
||||
if anonymize {
|
||||
for _, s := range failure.Sensitive {
|
||||
|
@ -17,13 +17,17 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
"github.com/magiconair/properties/assert"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -31,9 +35,15 @@ import (
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// sub-function
|
||||
// helper function: get type name of an analyzer
|
||||
func getTypeName(i interface{}) string {
|
||||
return reflect.TypeOf(i).Name()
|
||||
}
|
||||
|
||||
// helper function: run analysis with filter
|
||||
func analysis_RunAnalysisFilterTester(t *testing.T, filterFlag string) []common.Result {
|
||||
clientset := fake.NewSimpleClientset(
|
||||
&v1.Pod{
|
||||
@ -404,3 +414,252 @@ func TestGetAIResultForSanitizedFailures(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in NewAnalysis with explain=false
|
||||
func TestVerbose_NewAnalysisWithoutExplain(t *testing.T) {
|
||||
// Set viper config.
|
||||
viper.Set("verbose", true)
|
||||
viper.Set("kubecontext", "dummy")
|
||||
viper.Set("kubeconfig", "dummy")
|
||||
|
||||
// Patch kubernetes.NewClient to return a dummy client.
|
||||
patches := gomonkey.ApplyFunc(kubernetes.NewClient, func(kubecontext, kubeconfig string) (*kubernetes.Client, error) {
|
||||
return &kubernetes.Client{
|
||||
Config: &rest.Config{Host: "fake-server"},
|
||||
}, nil
|
||||
})
|
||||
defer patches.Reset()
|
||||
|
||||
output := util.CaptureOutput(func() {
|
||||
a, err := NewAnalysis(
|
||||
"", "english", []string{"Pod"}, "default", "", true,
|
||||
false, // explain
|
||||
10, false, false, []string{}, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
a.Close()
|
||||
})
|
||||
|
||||
expectedOutputs := []string{
|
||||
"Debug: Checking kubernetes client initialization.",
|
||||
"Debug: Kubernetes client initialized, server=fake-server.",
|
||||
"Debug: Checking cache configuration.",
|
||||
"Debug: Cache configuration loaded, type=file.",
|
||||
"Debug: Cache disabled.",
|
||||
"Debug: Analysis configuration loaded, filters=[Pod], language=english, namespace=default, labelSelector=none, explain=false, maxConcurrency=10, withDoc=false, withStats=false.",
|
||||
}
|
||||
for _, expected := range expectedOutputs {
|
||||
if !util.Contains(output, expected) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in NewAnalysis with explain=true
|
||||
func TestVerbose_NewAnalysisWithExplain(t *testing.T) {
|
||||
// Set viper config.
|
||||
viper.Set("verbose", true)
|
||||
viper.Set("kubecontext", "dummy")
|
||||
viper.Set("kubeconfig", "dummy")
|
||||
// Set a dummy AI configuration.
|
||||
dummyAIConfig := map[string]interface{}{
|
||||
"defaultProvider": "dummy",
|
||||
"providers": []map[string]interface{}{
|
||||
{
|
||||
"name": "dummy",
|
||||
"baseUrl": "http://dummy",
|
||||
"model": "dummy-model",
|
||||
"customHeaders": map[string]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
viper.Set("ai", dummyAIConfig)
|
||||
|
||||
// Patch kubernetes.NewClient to return a dummy client.
|
||||
patches := gomonkey.ApplyFunc(kubernetes.NewClient, func(kubecontext, kubeconfig string) (*kubernetes.Client, error) {
|
||||
return &kubernetes.Client{
|
||||
Config: &rest.Config{Host: "fake-server"},
|
||||
}, nil
|
||||
})
|
||||
defer patches.Reset()
|
||||
|
||||
// Patch ai.NewClient to return a NoOp client.
|
||||
patches2 := gomonkey.ApplyFunc(ai.NewClient, func(name string) ai.IAI {
|
||||
return &ai.NoOpAIClient{}
|
||||
})
|
||||
defer patches2.Reset()
|
||||
|
||||
output := util.CaptureOutput(func() {
|
||||
a, err := NewAnalysis(
|
||||
"", "english", []string{"Pod"}, "default", "", true,
|
||||
true, // explain
|
||||
10, false, false, []string{}, false,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
a.Close()
|
||||
})
|
||||
|
||||
expectedOutputs := []string{
|
||||
"Debug: Checking AI configuration.",
|
||||
"Debug: Using default AI provider dummy.",
|
||||
"Debug: AI configuration loaded, provider=dummy, baseUrl=http://dummy, model=dummy-model.",
|
||||
"Debug: Checking AI client initialization.",
|
||||
"Debug: AI client initialized.",
|
||||
}
|
||||
for _, expected := range expectedOutputs {
|
||||
if !util.Contains(output, expected) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in RunAnalysis with filter flag
|
||||
func TestVerbose_RunAnalysisWithFilter(t *testing.T) {
|
||||
viper.Set("verbose", true)
|
||||
// Run analysis with a filter flag ("Pod") to trigger debug output.
|
||||
output := util.CaptureOutput(func() {
|
||||
_ = analysis_RunAnalysisFilterTester(t, "Pod")
|
||||
})
|
||||
|
||||
expectedOutputs := []string{
|
||||
"Debug: Filter flags [Pod] specified, run selected core analyzers.",
|
||||
"Debug: PodAnalyzer launched.",
|
||||
"Debug: PodAnalyzer completed without errors.",
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutputs {
|
||||
if !util.Contains(output, expected) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in RunAnalysis with active filter
|
||||
func TestVerbose_RunAnalysisWithActiveFilter(t *testing.T) {
|
||||
viper.Set("verbose", true)
|
||||
viper.SetDefault("active_filters", "Ingress")
|
||||
output := util.CaptureOutput(func() {
|
||||
_ = analysis_RunAnalysisFilterTester(t, "")
|
||||
})
|
||||
|
||||
expectedOutputs := []string{
|
||||
"Debug: Found active filters [Ingress], run selected core analyzers.",
|
||||
"Debug: IngressAnalyzer launched.",
|
||||
"Debug: IngressAnalyzer completed without errors.",
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutputs {
|
||||
if !util.Contains(output, expected) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in RunAnalysis without any filter (run all core analyzers)
|
||||
func TestVerbose_RunAnalysisWithoutFilter(t *testing.T) {
|
||||
viper.Set("verbose", true)
|
||||
// Clear filter flag and active_filters to run all core analyzers.
|
||||
viper.SetDefault("active_filters", []string{})
|
||||
output := util.CaptureOutput(func() {
|
||||
_ = analysis_RunAnalysisFilterTester(t, "")
|
||||
})
|
||||
|
||||
// Check for debug message indicating no filters.
|
||||
expectedNoFilter := "Debug: No filters selected and no active filters found, run all core analyzers."
|
||||
if !util.Contains(output, expectedNoFilter) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expectedNoFilter, output)
|
||||
}
|
||||
|
||||
// Get all core analyzers from analyzer.GetAnalyzerMap()
|
||||
coreAnalyzerMap, _ := analyzer.GetAnalyzerMap()
|
||||
for _, analyzerInstance := range coreAnalyzerMap {
|
||||
analyzerType := getTypeName(analyzerInstance)
|
||||
expectedLaunched := fmt.Sprintf("Debug: %s launched.", analyzerType)
|
||||
expectedCompleted := fmt.Sprintf("Debug: %s completed without errors.", analyzerType)
|
||||
if !util.Contains(output, expectedLaunched) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expectedLaunched, output)
|
||||
}
|
||||
if !util.Contains(output, expectedCompleted) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expectedCompleted, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in RunCustomAnalysis without custom analyzer
|
||||
func TestVerbose_RunCustomAnalysisWithoutCustomAnalyzer(t *testing.T) {
|
||||
viper.Set("verbose", true)
|
||||
// Set custom_analyzers to empty array to trigger "No custom analyzers" debug message.
|
||||
viper.Set("custom_analyzers", []interface{}{})
|
||||
analysisObj := &Analysis{
|
||||
MaxConcurrency: 1,
|
||||
}
|
||||
output := util.CaptureOutput(func() {
|
||||
analysisObj.RunCustomAnalysis()
|
||||
})
|
||||
expected := "Debug: No custom analyzers found."
|
||||
if !util.Contains(output, "Debug: No custom analyzers found.") {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in RunCustomAnalysis with custom analyzer
|
||||
func TestVerbose_RunCustomAnalysisWithCustomAnalyzer(t *testing.T) {
|
||||
viper.Set("verbose", true)
|
||||
// Set custom_analyzers with one custom analyzer using "fake" connection.
|
||||
viper.Set("custom_analyzers", []map[string]interface{}{
|
||||
{
|
||||
"name": "TestCustomAnalyzer",
|
||||
"connection": map[string]interface{}{"url": "127.0.0.1", "port": "2333"},
|
||||
},
|
||||
})
|
||||
|
||||
analysisObj := &Analysis{
|
||||
MaxConcurrency: 1,
|
||||
}
|
||||
output := util.CaptureOutput(func() {
|
||||
analysisObj.RunCustomAnalysis()
|
||||
})
|
||||
assert.Equal(t, 1, len(analysisObj.Errors)) // connection error
|
||||
|
||||
expectedOutputs := []string{
|
||||
"Debug: Found custom analyzers [TestCustomAnalyzer].",
|
||||
"Debug: TestCustomAnalyzer launched.",
|
||||
"Debug: TestCustomAnalyzer completed with errors.",
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutputs {
|
||||
if !util.Contains(output, expected) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Verbose output in GetAIResults
|
||||
func TestVerbose_GetAIResults(t *testing.T) {
|
||||
viper.Set("verbose", true)
|
||||
disabledCache := cache.New("disabled-cache")
|
||||
disabledCache.DisableCache()
|
||||
aiClient := &ai.NoOpAIClient{}
|
||||
analysisObj := Analysis{
|
||||
AIClient: aiClient,
|
||||
Cache: disabledCache,
|
||||
Results: []common.Result{
|
||||
{
|
||||
Kind: "Deployment",
|
||||
Name: "test-deployment",
|
||||
Error: []common.Failure{{Text: "test-problem", Sensitive: []common.Sensitive{}}},
|
||||
Details: "test-solution",
|
||||
ParentObject: "parent-resource",
|
||||
},
|
||||
},
|
||||
Namespace: "default",
|
||||
}
|
||||
output := util.CaptureOutput(func() {
|
||||
_ = analysisObj.GetAIResults("json", false)
|
||||
})
|
||||
|
||||
expected := "Debug: Generating AI analysis."
|
||||
if !util.Contains(output, expected) {
|
||||
t.Errorf("Expected output to contain: '%s', but got output: '%s'", expected, output)
|
||||
}
|
||||
}
|
||||
|
@ -43,16 +43,19 @@ var coreAnalyzerMap = map[string]common.IAnalyzer{
|
||||
"Node": NodeAnalyzer{},
|
||||
"ValidatingWebhookConfiguration": ValidatingWebhookAnalyzer{},
|
||||
"MutatingWebhookConfiguration": MutatingWebhookAnalyzer{},
|
||||
"ConfigMap": ConfigMapAnalyzer{},
|
||||
}
|
||||
|
||||
var additionalAnalyzerMap = map[string]common.IAnalyzer{
|
||||
"HorizontalPodAutoScaler": HpaAnalyzer{},
|
||||
"HorizontalPodAutoscaler": HpaAnalyzer{},
|
||||
"PodDisruptionBudget": PdbAnalyzer{},
|
||||
"NetworkPolicy": NetworkPolicyAnalyzer{},
|
||||
"Log": LogAnalyzer{},
|
||||
"GatewayClass": GatewayClassAnalyzer{},
|
||||
"Gateway": GatewayAnalyzer{},
|
||||
"HTTPRoute": HTTPRouteAnalyzer{},
|
||||
"Storage": StorageAnalyzer{},
|
||||
"Security": SecurityAnalyzer{},
|
||||
}
|
||||
|
||||
func ListFilters() ([]string, []string, []string) {
|
||||
|
125
pkg/analyzer/configmap.go
Normal file
125
pkg/analyzer/configmap.go
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ConfigMapAnalyzer struct{}
|
||||
|
||||
func (ConfigMapAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kind := "ConfigMap"
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
})
|
||||
|
||||
// Get all ConfigMaps in the namespace
|
||||
configMaps, err := a.Client.GetClient().CoreV1().ConfigMaps(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get all Pods to check ConfigMap usage
|
||||
pods, err := a.Client.GetClient().CoreV1().Pods(a.Namespace).List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var results []common.Result
|
||||
|
||||
// Track which ConfigMaps are used
|
||||
usedConfigMaps := make(map[string]bool)
|
||||
configMapUsage := make(map[string][]string) // maps ConfigMap name to list of pods using it
|
||||
|
||||
// Analyze ConfigMap usage in Pods
|
||||
for _, pod := range pods.Items {
|
||||
// Check volume mounts
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.ConfigMap != nil {
|
||||
usedConfigMaps[volume.ConfigMap.Name] = true
|
||||
configMapUsage[volume.ConfigMap.Name] = append(configMapUsage[volume.ConfigMap.Name], pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check environment variables
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.ConfigMapRef != nil {
|
||||
usedConfigMaps[env.ConfigMapRef.Name] = true
|
||||
configMapUsage[env.ConfigMapRef.Name] = append(configMapUsage[env.ConfigMapRef.Name], pod.Name)
|
||||
}
|
||||
}
|
||||
for _, env := range container.Env {
|
||||
if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil {
|
||||
usedConfigMaps[env.ValueFrom.ConfigMapKeyRef.Name] = true
|
||||
configMapUsage[env.ValueFrom.ConfigMapKeyRef.Name] = append(configMapUsage[env.ValueFrom.ConfigMapKeyRef.Name], pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze each ConfigMap
|
||||
for _, cm := range configMaps.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for unused ConfigMaps
|
||||
if !usedConfigMaps[cm.Name] {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ConfigMap %s is not used by any pods in the namespace", cm.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for empty ConfigMaps
|
||||
if len(cm.Data) == 0 && len(cm.BinaryData) == 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ConfigMap %s is empty", cm.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for large ConfigMaps (over 1MB)
|
||||
totalSize := 0
|
||||
for _, value := range cm.Data {
|
||||
totalSize += len(value)
|
||||
}
|
||||
for _, value := range cm.BinaryData {
|
||||
totalSize += len(value)
|
||||
}
|
||||
if totalSize > 1024*1024 { // 1MB
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ConfigMap %s is larger than 1MB (%d bytes)", cm.Name, totalSize),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: kind,
|
||||
Name: fmt.Sprintf("%s/%s", cm.Namespace, cm.Name),
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues(kind, cm.Name, cm.Namespace).Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
149
pkg/analyzer/configmap_test.go
Normal file
149
pkg/analyzer/configmap_test.go
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestConfigMapAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
configMaps []v1.ConfigMap
|
||||
pods []v1.Pod
|
||||
expectedErrors int
|
||||
}{
|
||||
{
|
||||
name: "unused configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "unused-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "empty configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "empty-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "large configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "large-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"key": string(make([]byte, 1024*1024+1)), // 1MB + 1 byte
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "used configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "used-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "used-cm",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
// Create test resources
|
||||
for _, cm := range tt.configMaps {
|
||||
_, err := client.CoreV1().ConfigMaps(tt.namespace).Create(context.TODO(), &cm, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, pod := range tt.pods {
|
||||
_, err := client.CoreV1().Pods(tt.namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
analyzer := ConfigMapAnalyzer{}
|
||||
results, err := analyzer.Analyze(common.Analyzer{
|
||||
Client: &kubernetes.Client{Client: client},
|
||||
Context: context.TODO(),
|
||||
Namespace: tt.namespace,
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedErrors, len(results))
|
||||
})
|
||||
}
|
||||
}
|
@ -22,179 +22,274 @@ import (
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/require"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestCronJobAnalyzer(t *testing.T) {
|
||||
suspend := new(bool)
|
||||
*suspend = true
|
||||
|
||||
invalidStartingDeadline := new(int64)
|
||||
*invalidStartingDeadline = -7
|
||||
|
||||
validStartingDeadline := new(int64)
|
||||
*validStartingDeadline = 7
|
||||
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ1",
|
||||
// This CronJob won't be list because of namespace filtering.
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ2",
|
||||
Namespace: "default",
|
||||
},
|
||||
// A suspended CronJob will contribute to failures.
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Suspend: suspend,
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
// Valid schedule
|
||||
Schedule: "*/1 * * * *",
|
||||
|
||||
// Negative starting deadline
|
||||
StartingDeadlineSeconds: invalidStartingDeadline,
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ4",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
// Invalid schedule
|
||||
Schedule: "*** * * * *",
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ5",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
// Valid schedule
|
||||
Schedule: "*/1 * * * *",
|
||||
|
||||
// Positive starting deadline shouldn't be any problem.
|
||||
StartingDeadlineSeconds: validStartingDeadline,
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
// This cronjob shouldn't contribute to any failures.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "successful-cronjob",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"analysisDate": "2022-04-01",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "example-app",
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/1 * * * *",
|
||||
ConcurrencyPolicy: "Allow",
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "Suspended CronJob",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "example-app",
|
||||
},
|
||||
Name: "suspended-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "example-container",
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *",
|
||||
Suspend: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
),
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/suspended-job",
|
||||
failuresCount: 1, // One failure for being suspended
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule format",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-schedule",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron", // Invalid cron format
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/invalid-schedule",
|
||||
failuresCount: 1, // One failure for invalid schedule
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Negative starting deadline",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "negative-deadline",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *",
|
||||
StartingDeadlineSeconds: int64Ptr(-60), // Negative deadline
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/negative-deadline",
|
||||
failuresCount: 1, // One failure for negative deadline
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid CronJob",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Valid cron format
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for valid job
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple issues",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "multiple-issues",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron",
|
||||
StartingDeadlineSeconds: int64Ptr(-60),
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/multiple-issues",
|
||||
failuresCount: 2, // Two failures: invalid schedule and negative deadline
|
||||
},
|
||||
},
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
cjAnalyzer := CronJobAnalyzer{}
|
||||
results, err := cjAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
analyzer := CronJobAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
expectations := []string{
|
||||
"default/CJ2",
|
||||
"default/CJ3",
|
||||
"default/CJ4",
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i], result.Name)
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCronJobAnalyzerLabelSelectorFiltering(t *testing.T) {
|
||||
suspend := new(bool)
|
||||
*suspend = true
|
||||
|
||||
invalidStartingDeadline := new(int64)
|
||||
*invalidStartingDeadline = -7
|
||||
|
||||
validStartingDeadline := new(int64)
|
||||
*validStartingDeadline = 7
|
||||
func TestCronJobAnalyzerLabelSelector(t *testing.T) {
|
||||
clientSet := fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-with-label",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron", // This should trigger a failure
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-without-label",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron", // This should trigger a failure
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Test with label selector
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "cronjob",
|
||||
},
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ2",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
),
|
||||
Client: clientSet,
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
LabelSelector: "app=cronjob",
|
||||
LabelSelector: "app=test",
|
||||
}
|
||||
|
||||
cjAnalyzer := CronJobAnalyzer{}
|
||||
results, err := cjAnalyzer.Analyze(config)
|
||||
analyzer := CronJobAnalyzer{}
|
||||
results, err := analyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
require.Equal(t, "default/CJ1", results[0].Name)
|
||||
require.Equal(t, "default/job-with-label", results[0].Name)
|
||||
}
|
||||
|
||||
func TestCheckCronScheduleIsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
schedule string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Valid schedule - every 5 minutes",
|
||||
schedule: "*/5 * * * *",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Valid schedule - specific time",
|
||||
schedule: "0 2 * * *",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Valid schedule - complex",
|
||||
schedule: "0 0 1,15 * 3",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule - wrong format",
|
||||
schedule: "invalid-cron",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule - too many fields",
|
||||
schedule: "* * * * * *",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule - empty string",
|
||||
schedule: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := CheckCronScheduleIsValid(tt.schedule)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -34,7 +35,7 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "autoscaling",
|
||||
Version: "v1",
|
||||
Version: "v2",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
@ -56,11 +57,22 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
//check the error from status field
|
||||
conditions := hpa.Status.Conditions
|
||||
for _, condition := range conditions {
|
||||
if condition.Status != "True" {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: condition.Message,
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
// https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#appendix-horizontal-pod-autoscaler-status-conditions
|
||||
switch condition.Type {
|
||||
case autoscalingv2.ScalingLimited:
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: condition.Message,
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
default:
|
||||
if condition.Status == corev1.ConditionFalse {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: condition.Message,
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -735,3 +735,87 @@ func TestHPAAnalyzerStatusField(t *testing.T) {
|
||||
assert.Equal(t, len(analysisResults), 1)
|
||||
|
||||
}
|
||||
|
||||
func TestHPAAnalyzerStatusScalingLimitedError(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset(
|
||||
&autoscalingv2.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||
Kind: "Deployment",
|
||||
Name: "example",
|
||||
},
|
||||
},
|
||||
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
|
||||
Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: "True",
|
||||
Message: "recommended size matches current size",
|
||||
},
|
||||
{
|
||||
Type: autoscalingv2.ScalingActive,
|
||||
Status: "True",
|
||||
Message: "the HPA was able to successfully calculate a replica count",
|
||||
},
|
||||
{
|
||||
Type: autoscalingv2.ScalingLimited,
|
||||
Status: "True",
|
||||
Message: "the desired replica count is less than the minimum replica count",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "example",
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
hpaAnalyzer := HpaAnalyzer{}
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: clientset,
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
}
|
||||
analysisResults, err := hpaAnalyzer.Analyze(config)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var errorFound bool
|
||||
want := "the desired replica count is less than the minimum replica count"
|
||||
for _, analysis := range analysisResults {
|
||||
for _, got := range analysis.Error {
|
||||
if want == got.Text {
|
||||
errorFound = true
|
||||
}
|
||||
}
|
||||
if errorFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !errorFound {
|
||||
t.Errorf("Expected message, <%v> , not found in HorizontalPodAutoscaler's analysis results", want)
|
||||
}
|
||||
}
|
@ -28,213 +28,334 @@ import (
|
||||
)
|
||||
|
||||
func TestIngressAnalyzer(t *testing.T) {
|
||||
validIgClassName := new(string)
|
||||
*validIgClassName = "valid-ingress-class"
|
||||
|
||||
var igRule networkingv1.IngressRule
|
||||
|
||||
httpRule := networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
// This service exists.
|
||||
Name: "Service1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "/test1",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
// This service is in the test namespace
|
||||
// Hence, it won't be discovered.
|
||||
Name: "Service2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "/test2",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
// This service doesn't exist.
|
||||
Name: "Service3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
igRule.IngressRuleValue.HTTP = &httpRule
|
||||
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
// Doesn't specify an ingress class.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress2",
|
||||
Namespace: "default",
|
||||
// Specify an invalid ingress class name using annotations.
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "invalid-class",
|
||||
},
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
// Namespace filtering.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress3",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: *validIgClassName,
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress4",
|
||||
Namespace: "default",
|
||||
// Specify valid ingress class name using annotations.
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": *validIgClassName,
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Namespace filtering.
|
||||
Name: "Service2",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Secret1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Secret2",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress5",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
||||
// Specify valid ingress class name in spec.
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: validIgClassName,
|
||||
Rules: []networkingv1.IngressRule{
|
||||
igRule,
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
// This won't contribute to any failures.
|
||||
SecretName: "Secret1",
|
||||
},
|
||||
{
|
||||
// This secret won't be discovered because of namespace filtering.
|
||||
SecretName: "Secret2",
|
||||
},
|
||||
{
|
||||
// This secret doesn't exist.
|
||||
SecretName: "Secret3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
igAnalyzer := IngressAnalyzer{}
|
||||
results, err := igAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
expectations := []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "default/Ingress1",
|
||||
failuresCount: 1,
|
||||
name: "Missing ingress class",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-class",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// No ingress class specified
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/no-class",
|
||||
failuresCount: 1, // One failure for missing ingress class
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default/Ingress2",
|
||||
failuresCount: 1,
|
||||
name: "Non-existent ingress class",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-class",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("non-existent"),
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-class",
|
||||
failuresCount: 1, // One failure for non-existent ingress class
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default/Ingress5",
|
||||
failuresCount: 4,
|
||||
name: "Non-existent backend service",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-backend",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: pathTypePtr(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-backend",
|
||||
failuresCount: 2, // Two failures: non-existent ingress class and non-existent service
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent TLS secret",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-tls",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-tls",
|
||||
failuresCount: 2, // Two failures: non-existent ingress class and non-existent TLS secret
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid ingress with all components",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-ingress",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("nginx"),
|
||||
},
|
||||
},
|
||||
&networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nginx",
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backend-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tls-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for valid ingress
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple issues",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "multiple-issues",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("non-existent"),
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: pathTypePtr(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/multiple-issues",
|
||||
failuresCount: 3, // Three failures: ingress class, service, and TLS secret
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
analyzer := IngressAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i].name, result.Name)
|
||||
require.Equal(t, expectations[i].failuresCount, len(result.Error))
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIngressAnalyzerLabelSelectorFiltering(t *testing.T) {
|
||||
validIgClassName := new(string)
|
||||
*validIgClassName = "valid-ingress-class"
|
||||
func TestIngressAnalyzerLabelSelector(t *testing.T) {
|
||||
clientSet := fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-with-label",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// Missing ingress class to trigger a failure
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-without-label",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// Missing ingress class to trigger a failure
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Test with label selector
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "ingress",
|
||||
},
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress2",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
),
|
||||
Client: clientSet,
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
LabelSelector: "app=ingress",
|
||||
LabelSelector: "app=test",
|
||||
}
|
||||
|
||||
igAnalyzer := IngressAnalyzer{}
|
||||
results, err := igAnalyzer.Analyze(config)
|
||||
analyzer := IngressAnalyzer{}
|
||||
results, err := analyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
require.Equal(t, "default/Ingress1", results[0].Name)
|
||||
|
||||
require.Equal(t, "default/ingress-with-label", results[0].Name)
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func strPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func pathTypePtr(p networkingv1.PathType) *networkingv1.PathType {
|
||||
return &p
|
||||
}
|
||||
|
201
pkg/analyzer/security.go
Normal file
201
pkg/analyzer/security.go
Normal file
@ -0,0 +1,201 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type SecurityAnalyzer struct{}
|
||||
|
||||
func (SecurityAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kind := "Security"
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
})
|
||||
|
||||
var results []common.Result
|
||||
|
||||
// Analyze ServiceAccounts
|
||||
saResults, err := analyzeServiceAccounts(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, saResults...)
|
||||
|
||||
// Analyze RoleBindings
|
||||
rbResults, err := analyzeRoleBindings(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, rbResults...)
|
||||
|
||||
// Analyze Pod Security Contexts
|
||||
podResults, err := analyzePodSecurityContexts(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, podResults...)
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzeServiceAccounts(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
sas, err := a.Client.GetClient().CoreV1().ServiceAccounts(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sa := range sas.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for default service account usage
|
||||
if sa.Name == "default" {
|
||||
pods, err := a.Client.GetClient().CoreV1().Pods(sa.Namespace).List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultSAUsers := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.ServiceAccountName == "default" {
|
||||
defaultSAUsers = append(defaultSAUsers, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(defaultSAUsers) > 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Default service account is being used by pods: %v", defaultSAUsers),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Security/ServiceAccount",
|
||||
Name: fmt.Sprintf("%s/%s", sa.Namespace, sa.Name),
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Security/ServiceAccount", sa.Name, sa.Namespace).Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzeRoleBindings(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
rbs, err := a.Client.GetClient().RbacV1().RoleBindings(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, rb := range rbs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for wildcards in role references
|
||||
role, err := a.Client.GetClient().RbacV1().Roles(rb.Namespace).Get(a.Context, rb.RoleRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, rule := range role.Rules {
|
||||
if containsWildcard(rule.Verbs) || containsWildcard(rule.Resources) {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("RoleBinding %s references Role %s which contains wildcard permissions - this is not recommended for security best practices", rb.Name, role.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Security/RoleBinding",
|
||||
Name: fmt.Sprintf("%s/%s", rb.Namespace, rb.Name),
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Security/RoleBinding", rb.Name, rb.Namespace).Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzePodSecurityContexts(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
pods, err := a.Client.GetClient().CoreV1().Pods(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for privileged containers first (most critical)
|
||||
hasPrivilegedContainer := false
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Container %s in pod %s is running as privileged which poses security risks", container.Name, pod.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
hasPrivilegedContainer = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Only check for missing security context if no privileged containers found
|
||||
if !hasPrivilegedContainer && pod.Spec.SecurityContext == nil {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Pod %s does not have a security context defined which may pose security risks", pod.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Security/Pod",
|
||||
Name: fmt.Sprintf("%s/%s", pod.Namespace, pod.Name),
|
||||
Error: failures[:1],
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Security/Pod", pod.Name, pod.Namespace).Set(1)
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func containsWildcard(slice []string) bool {
|
||||
for _, item := range slice {
|
||||
if item == "*" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
181
pkg/analyzer/security_test.go
Normal file
181
pkg/analyzer/security_test.go
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestSecurityAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
serviceAccounts []v1.ServiceAccount
|
||||
pods []v1.Pod
|
||||
roles []rbacv1.Role
|
||||
roleBindings []rbacv1.RoleBinding
|
||||
expectedErrors int
|
||||
expectedKinds []string
|
||||
}{
|
||||
{
|
||||
name: "default service account usage",
|
||||
namespace: "default",
|
||||
serviceAccounts: []v1.ServiceAccount{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 2,
|
||||
expectedKinds: []string{"Security/ServiceAccount", "Security/Pod"},
|
||||
},
|
||||
{
|
||||
name: "privileged container",
|
||||
namespace: "default",
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "privileged-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "privileged-container",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
expectedKinds: []string{"Security/Pod"},
|
||||
},
|
||||
{
|
||||
name: "wildcard permissions in role",
|
||||
namespace: "default",
|
||||
roles: []rbacv1.Role{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wildcard-role",
|
||||
Namespace: "default",
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"*"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
roleBindings: []rbacv1.RoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-binding",
|
||||
Namespace: "default",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: "wildcard-role",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
expectedKinds: []string{"Security/RoleBinding"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
// Create test resources
|
||||
for _, sa := range tt.serviceAccounts {
|
||||
_, err := client.CoreV1().ServiceAccounts(tt.namespace).Create(context.TODO(), &sa, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, pod := range tt.pods {
|
||||
_, err := client.CoreV1().Pods(tt.namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, role := range tt.roles {
|
||||
_, err := client.RbacV1().Roles(tt.namespace).Create(context.TODO(), &role, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, rb := range tt.roleBindings {
|
||||
_, err := client.RbacV1().RoleBindings(tt.namespace).Create(context.TODO(), &rb, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
analyzer := SecurityAnalyzer{}
|
||||
results, err := analyzer.Analyze(common.Analyzer{
|
||||
Client: &kubernetes.Client{Client: client},
|
||||
Context: context.TODO(),
|
||||
Namespace: tt.namespace,
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Debug: Print all results
|
||||
t.Logf("Got %d results:", len(results))
|
||||
for _, result := range results {
|
||||
t.Logf(" Kind: %s, Name: %s", result.Kind, result.Name)
|
||||
for _, failure := range result.Error {
|
||||
t.Logf(" Failure: %s", failure.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// Count results by kind
|
||||
resultsByKind := make(map[string]int)
|
||||
for _, result := range results {
|
||||
resultsByKind[result.Kind]++
|
||||
}
|
||||
|
||||
// Check that we have the expected number of results for each kind
|
||||
for _, expectedKind := range tt.expectedKinds {
|
||||
assert.Equal(t, 1, resultsByKind[expectedKind], "Expected 1 result of kind %s", expectedKind)
|
||||
}
|
||||
|
||||
// Check total number of results matches expected kinds
|
||||
assert.Equal(t, len(tt.expectedKinds), len(results), "Expected %d total results", len(tt.expectedKinds))
|
||||
})
|
||||
}
|
||||
}
|
@ -24,145 +24,232 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
)
|
||||
|
||||
func TestServiceAnalyzer(t *testing.T) {
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Endpoint1",
|
||||
Namespace: "test",
|
||||
},
|
||||
// Endpoint with non-zero subsets.
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
// These not ready end points will contribute to failures.
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "test-reference",
|
||||
Name: "reference1",
|
||||
},
|
||||
},
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "test-reference",
|
||||
Name: "reference2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// These not ready end points will contribute to failures.
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "test-reference",
|
||||
Name: "reference3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Endpoint2",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{
|
||||
// Leader election record annotation key defined.
|
||||
resourcelock.LeaderElectionRecordAnnotationKey: "this is okay",
|
||||
},
|
||||
},
|
||||
// Endpoint with zero subsets.
|
||||
},
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// This won't contribute to any failures.
|
||||
Name: "non-existent-service",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
// Endpoint with zero subsets.
|
||||
},
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service1",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
// Endpoint with zero subsets.
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service1",
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app1": "test-app1",
|
||||
"app2": "test-app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// This service won't be discovered.
|
||||
Name: "Service2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app1": "test-app1",
|
||||
"app2": "test-app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service3",
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
// No Spec Selector
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "test",
|
||||
}
|
||||
|
||||
sAnalyzer := ServiceAnalyzer{}
|
||||
results, err := sAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
expectations := []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "test/Endpoint1",
|
||||
failuresCount: 1,
|
||||
name: "Service with no endpoints",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/test-service",
|
||||
failuresCount: 1, // One failure for no endpoints
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test/Service1",
|
||||
failuresCount: 2,
|
||||
name: "Service with not ready endpoints",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: "test-pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/test-service",
|
||||
failuresCount: 1, // One failure for not ready endpoints
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Service with warning events",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-event",
|
||||
Namespace: "default",
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Kind: "Service",
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Type: "Warning",
|
||||
Reason: "TestReason",
|
||||
Message: "Test warning message",
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/test-service",
|
||||
failuresCount: 2, // One failure for no endpoints, one for warning event
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Service with leader election annotation",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"control-plane.alpha.kubernetes.io/leader": "test-leader",
|
||||
},
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for leader election endpoints
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Service with non-existent service",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for non-existent service
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
analyzer := ServiceAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i].name, result.Name)
|
||||
require.Equal(t, expectations[i].failuresCount, len(result.Error))
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
216
pkg/analyzer/storage.go
Normal file
216
pkg/analyzer/storage.go
Normal file
@ -0,0 +1,216 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type StorageAnalyzer struct{}
|
||||
|
||||
func (StorageAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kind := "Storage"
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
})
|
||||
|
||||
var results []common.Result
|
||||
|
||||
// Analyze StorageClasses
|
||||
scResults, err := analyzeStorageClasses(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, scResults...)
|
||||
|
||||
// Analyze PersistentVolumes
|
||||
pvResults, err := analyzePersistentVolumes(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, pvResults...)
|
||||
|
||||
// Analyze PVCs with enhanced checks
|
||||
pvcResults, err := analyzePersistentVolumeClaims(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, pvcResults...)
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzeStorageClasses(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
scs, err := a.Client.GetClient().StorageV1().StorageClasses().List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sc := range scs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for deprecated storage classes
|
||||
if sc.Provisioner == "kubernetes.io/no-provisioner" {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("StorageClass %s uses deprecated provisioner 'kubernetes.io/no-provisioner'", sc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for default storage class
|
||||
if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
|
||||
// Check if there are multiple default storage classes
|
||||
defaultCount := 0
|
||||
for _, otherSc := range scs.Items {
|
||||
if otherSc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
|
||||
defaultCount++
|
||||
}
|
||||
}
|
||||
if defaultCount > 1 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Multiple default StorageClasses found (%d), which can cause confusion", defaultCount),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Storage/StorageClass",
|
||||
Name: sc.Name,
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Storage/StorageClass", sc.Name, "").Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzePersistentVolumes(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
pvs, err := a.Client.GetClient().CoreV1().PersistentVolumes().List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pv := range pvs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for released PVs
|
||||
if pv.Status.Phase == v1.VolumeReleased {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolume %s is in Released state and should be cleaned up", pv.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for failed PVs
|
||||
if pv.Status.Phase == v1.VolumeFailed {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolume %s is in Failed state", pv.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for small PVs (less than 1Gi)
|
||||
if capacity, ok := pv.Spec.Capacity[v1.ResourceStorage]; ok {
|
||||
if capacity.Cmp(resource.MustParse("1Gi")) < 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolume %s has small capacity (%s)", pv.Name, capacity.String()),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Storage/PersistentVolume",
|
||||
Name: pv.Name,
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Storage/PersistentVolume", pv.Name, "").Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzePersistentVolumeClaims(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
pvcs, err := a.Client.GetClient().CoreV1().PersistentVolumeClaims(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for PVC state issues first (most critical)
|
||||
switch pvc.Status.Phase {
|
||||
case v1.ClaimPending:
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s is in Pending state", pvc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
case v1.ClaimLost:
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s is in Lost state", pvc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
default:
|
||||
// Only check other issues if PVC is not in a critical state
|
||||
if capacity, ok := pvc.Spec.Resources.Requests[v1.ResourceStorage]; ok {
|
||||
if capacity.Cmp(resource.MustParse("1Gi")) < 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s has small capacity (%s)", pvc.Name, capacity.String()),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing storage class
|
||||
if pvc.Spec.StorageClassName == nil && pvc.Spec.VolumeName == "" {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s has no StorageClass specified", pvc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Only report the first failure found
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Storage/PersistentVolumeClaim",
|
||||
Name: fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name),
|
||||
Error: failures[:1],
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Storage/PersistentVolumeClaim", pvc.Name, pvc.Namespace).Set(1)
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
254
pkg/analyzer/storage_test.go
Normal file
254
pkg/analyzer/storage_test.go
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestStorageAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
storageClasses []storagev1.StorageClass
|
||||
pvs []v1.PersistentVolume
|
||||
pvcs []v1.PersistentVolumeClaim
|
||||
expectedErrors int
|
||||
}{
|
||||
{
|
||||
name: "Deprecated StorageClass",
|
||||
namespace: "default",
|
||||
storageClasses: []storagev1.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deprecated-sc",
|
||||
},
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Multiple Default StorageClasses",
|
||||
namespace: "default",
|
||||
storageClasses: []storagev1.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-sc1",
|
||||
Annotations: map[string]string{
|
||||
"storageclass.kubernetes.io/is-default-class": "true",
|
||||
},
|
||||
},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-sc2",
|
||||
Annotations: map[string]string{
|
||||
"storageclass.kubernetes.io/is-default-class": "true",
|
||||
},
|
||||
},
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
},
|
||||
},
|
||||
expectedErrors: 2,
|
||||
},
|
||||
{
|
||||
name: "Released PV",
|
||||
namespace: "default",
|
||||
pvs: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "released-pv",
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeReleased,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Failed PV",
|
||||
namespace: "default",
|
||||
pvs: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "failed-pv",
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeFailed,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Small PV",
|
||||
namespace: "default",
|
||||
pvs: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "small-pv",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pending-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimPending,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "lost-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimLost,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Small PVC",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "small-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "PVC without StorageClass",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-sc-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create fake client
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
// Create test resources
|
||||
for _, sc := range tt.storageClasses {
|
||||
_, err := client.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create StorageClass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pv := range tt.pvs {
|
||||
_, err := client.CoreV1().PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pvc := range tt.pvcs {
|
||||
_, err := client.CoreV1().PersistentVolumeClaims(tt.namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PVC: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create analyzer
|
||||
analyzer := StorageAnalyzer{}
|
||||
|
||||
// Create analyzer config
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: client,
|
||||
},
|
||||
Context: context.TODO(),
|
||||
Namespace: tt.namespace,
|
||||
}
|
||||
|
||||
// Run analysis
|
||||
results, err := analyzer.Analyze(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to run analysis: %v", err)
|
||||
}
|
||||
|
||||
// Count total errors
|
||||
totalErrors := 0
|
||||
for _, result := range results {
|
||||
totalErrors += len(result.Error)
|
||||
}
|
||||
|
||||
// Check error count
|
||||
if totalErrors != tt.expectedErrors {
|
||||
t.Errorf("Expected %d errors, got %d", tt.expectedErrors, totalErrors)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
10
pkg/analyzer/test_utils.go
Normal file
10
pkg/analyzer/test_utils.go
Normal file
@ -0,0 +1,10 @@
|
||||
package analyzer
|
||||
|
||||
// Helper functions for tests
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func int64Ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
@ -1,8 +1,10 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
)
|
||||
|
||||
@ -10,9 +12,50 @@ func (h *Handler) Query(ctx context.Context, i *schemav1.QueryRequest) (
|
||||
*schemav1.QueryResponse,
|
||||
error,
|
||||
) {
|
||||
aiClient := ai.NewClient(i.Backend)
|
||||
// Create client factory and config provider
|
||||
factory := ai.GetAIClientFactory()
|
||||
configProvider := ai.GetConfigProvider()
|
||||
|
||||
// Use the factory to create the client
|
||||
aiClient := factory.NewClient(i.Backend)
|
||||
defer aiClient.Close()
|
||||
|
||||
var configAI ai.AIConfiguration
|
||||
if err := configProvider.UnmarshalKey("ai", &configAI); err != nil {
|
||||
return &schemav1.QueryResponse{
|
||||
Response: "",
|
||||
Error: &schemav1.QueryError{
|
||||
Message: fmt.Sprintf("Failed to unmarshal AI configuration: %v", err),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var aiProvider ai.AIProvider
|
||||
for _, provider := range configAI.Providers {
|
||||
if i.Backend == provider.Name {
|
||||
aiProvider = provider
|
||||
break
|
||||
}
|
||||
}
|
||||
if aiProvider.Name == "" {
|
||||
return &schemav1.QueryResponse{
|
||||
Response: "",
|
||||
Error: &schemav1.QueryError{
|
||||
Message: fmt.Sprintf("AI provider %s not found in configuration", i.Backend),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Configure the AI client
|
||||
if err := aiClient.Configure(&aiProvider); err != nil {
|
||||
return &schemav1.QueryResponse{
|
||||
Response: "",
|
||||
Error: &schemav1.QueryError{
|
||||
Message: fmt.Sprintf("Failed to configure AI client: %v", err),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
resp, err := aiClient.GetCompletion(ctx, i.Query)
|
||||
var errMessage string = ""
|
||||
if err != nil {
|
||||
|
310
pkg/server/query/query_test.go
Normal file
310
pkg/server/query/query_test.go
Normal file
@ -0,0 +1,310 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockAI is a mock implementation of the ai.IAI interface for testing
|
||||
type MockAI struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockAI) Configure(config ai.IAIConfig) error {
|
||||
args := m.Called(config)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *MockAI) GetCompletion(ctx context.Context, prompt string) (string, error) {
|
||||
args := m.Called(ctx, prompt)
|
||||
return args.String(0), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *MockAI) GetName() string {
|
||||
args := m.Called()
|
||||
return args.String(0)
|
||||
}
|
||||
|
||||
func (m *MockAI) Close() {
|
||||
m.Called()
|
||||
}
|
||||
|
||||
// MockAIClientFactory is a mock implementation of AIClientFactory
|
||||
type MockAIClientFactory struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockAIClientFactory) NewClient(provider string) ai.IAI {
|
||||
args := m.Called(provider)
|
||||
return args.Get(0).(ai.IAI)
|
||||
}
|
||||
|
||||
// MockConfigProvider is a mock implementation of ConfigProvider
|
||||
type MockConfigProvider struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockConfigProvider) UnmarshalKey(key string, rawVal interface{}) error {
|
||||
args := m.Called(key, rawVal)
|
||||
|
||||
// If we want to set the rawVal (which is a pointer)
|
||||
if fn, ok := args.Get(0).(func(interface{})); ok && fn != nil {
|
||||
fn(rawVal)
|
||||
}
|
||||
|
||||
// Return the error as the first return value
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func TestQuery_Success(t *testing.T) {
|
||||
// Setup mocks
|
||||
mockAI := new(MockAI)
|
||||
mockFactory := new(MockAIClientFactory)
|
||||
mockConfig := new(MockConfigProvider)
|
||||
|
||||
// Set test implementations
|
||||
ai.SetTestAIClientFactory(mockFactory)
|
||||
ai.SetTestConfigProvider(mockConfig)
|
||||
defer ai.ResetTestImplementations()
|
||||
|
||||
// Define test data
|
||||
testBackend := "test-backend"
|
||||
testQuery := "test query"
|
||||
testResponse := "test response"
|
||||
|
||||
// Setup expectations
|
||||
mockFactory.On("NewClient", testBackend).Return(mockAI)
|
||||
mockAI.On("Close").Return()
|
||||
|
||||
// Set up configuration with a valid provider
|
||||
mockConfig.On("UnmarshalKey", "ai", mock.Anything).Run(func(args mock.Arguments) {
|
||||
config := args.Get(1).(*ai.AIConfiguration)
|
||||
*config = ai.AIConfiguration{
|
||||
Providers: []ai.AIProvider{
|
||||
{
|
||||
Name: testBackend,
|
||||
Password: "test-password",
|
||||
Model: "test-model",
|
||||
},
|
||||
},
|
||||
}
|
||||
}).Return(nil)
|
||||
|
||||
mockAI.On("Configure", mock.AnythingOfType("*ai.AIProvider")).Return(nil)
|
||||
mockAI.On("GetCompletion", mock.Anything, testQuery).Return(testResponse, nil)
|
||||
|
||||
// Create handler and call Query
|
||||
handler := &Handler{}
|
||||
response, err := handler.Query(context.Background(), &schemav1.QueryRequest{
|
||||
Backend: testBackend,
|
||||
Query: testQuery,
|
||||
})
|
||||
|
||||
// Assertions
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, response)
|
||||
assert.Equal(t, testResponse, response.Response)
|
||||
assert.Equal(t, "", response.Error.Message)
|
||||
|
||||
// Verify mocks
|
||||
mockAI.AssertExpectations(t)
|
||||
mockFactory.AssertExpectations(t)
|
||||
mockConfig.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestQuery_UnmarshalError(t *testing.T) {
|
||||
// Setup mocks
|
||||
mockAI := new(MockAI)
|
||||
mockFactory := new(MockAIClientFactory)
|
||||
mockConfig := new(MockConfigProvider)
|
||||
|
||||
// Set test implementations
|
||||
ai.SetTestAIClientFactory(mockFactory)
|
||||
ai.SetTestConfigProvider(mockConfig)
|
||||
defer ai.ResetTestImplementations()
|
||||
|
||||
// Setup expectations
|
||||
mockFactory.On("NewClient", "test-backend").Return(mockAI)
|
||||
mockAI.On("Close").Return()
|
||||
|
||||
// Mock unmarshal error
|
||||
mockConfig.On("UnmarshalKey", "ai", mock.Anything).Return(errors.New("unmarshal error"))
|
||||
|
||||
// Create handler and call Query
|
||||
handler := &Handler{}
|
||||
response, err := handler.Query(context.Background(), &schemav1.QueryRequest{
|
||||
Backend: "test-backend",
|
||||
Query: "test query",
|
||||
})
|
||||
|
||||
// Assertions
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, response)
|
||||
assert.Equal(t, "", response.Response)
|
||||
assert.Contains(t, response.Error.Message, "Failed to unmarshal AI configuration")
|
||||
|
||||
// Verify mocks
|
||||
mockAI.AssertExpectations(t)
|
||||
mockFactory.AssertExpectations(t)
|
||||
mockConfig.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestQuery_ProviderNotFound(t *testing.T) {
|
||||
// Setup mocks
|
||||
mockAI := new(MockAI)
|
||||
mockFactory := new(MockAIClientFactory)
|
||||
mockConfig := new(MockConfigProvider)
|
||||
|
||||
// Set test implementations
|
||||
ai.SetTestAIClientFactory(mockFactory)
|
||||
ai.SetTestConfigProvider(mockConfig)
|
||||
defer ai.ResetTestImplementations()
|
||||
|
||||
// Define test data
|
||||
testBackend := "test-backend"
|
||||
|
||||
// Setup expectations
|
||||
mockFactory.On("NewClient", testBackend).Return(mockAI)
|
||||
mockAI.On("Close").Return()
|
||||
|
||||
// Set up configuration with no matching provider
|
||||
mockConfig.On("UnmarshalKey", "ai", mock.Anything).Run(func(args mock.Arguments) {
|
||||
config := args.Get(1).(*ai.AIConfiguration)
|
||||
*config = ai.AIConfiguration{
|
||||
Providers: []ai.AIProvider{
|
||||
{
|
||||
Name: "other-backend",
|
||||
},
|
||||
},
|
||||
}
|
||||
}).Return(nil)
|
||||
|
||||
// Create handler and call Query
|
||||
handler := &Handler{}
|
||||
response, err := handler.Query(context.Background(), &schemav1.QueryRequest{
|
||||
Backend: testBackend,
|
||||
Query: "test query",
|
||||
})
|
||||
|
||||
// Assertions
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, response)
|
||||
assert.Equal(t, "", response.Response)
|
||||
assert.Contains(t, response.Error.Message, "AI provider test-backend not found in configuration")
|
||||
|
||||
// Verify mocks
|
||||
mockAI.AssertExpectations(t)
|
||||
mockFactory.AssertExpectations(t)
|
||||
mockConfig.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestQuery_ConfigureError(t *testing.T) {
|
||||
// Setup mocks
|
||||
mockAI := new(MockAI)
|
||||
mockFactory := new(MockAIClientFactory)
|
||||
mockConfig := new(MockConfigProvider)
|
||||
|
||||
// Set test implementations
|
||||
ai.SetTestAIClientFactory(mockFactory)
|
||||
ai.SetTestConfigProvider(mockConfig)
|
||||
defer ai.ResetTestImplementations()
|
||||
|
||||
// Define test data
|
||||
testBackend := "test-backend"
|
||||
|
||||
// Setup expectations
|
||||
mockFactory.On("NewClient", testBackend).Return(mockAI)
|
||||
mockAI.On("Close").Return()
|
||||
|
||||
// Set up configuration with a valid provider
|
||||
mockConfig.On("UnmarshalKey", "ai", mock.Anything).Run(func(args mock.Arguments) {
|
||||
config := args.Get(1).(*ai.AIConfiguration)
|
||||
*config = ai.AIConfiguration{
|
||||
Providers: []ai.AIProvider{
|
||||
{
|
||||
Name: testBackend,
|
||||
},
|
||||
},
|
||||
}
|
||||
}).Return(nil)
|
||||
|
||||
// Mock configure error
|
||||
mockAI.On("Configure", mock.AnythingOfType("*ai.AIProvider")).Return(errors.New("configure error"))
|
||||
|
||||
// Create handler and call Query
|
||||
handler := &Handler{}
|
||||
response, err := handler.Query(context.Background(), &schemav1.QueryRequest{
|
||||
Backend: testBackend,
|
||||
Query: "test query",
|
||||
})
|
||||
|
||||
// Assertions
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, response)
|
||||
assert.Equal(t, "", response.Response)
|
||||
assert.Contains(t, response.Error.Message, "Failed to configure AI client")
|
||||
|
||||
// Verify mocks
|
||||
mockAI.AssertExpectations(t)
|
||||
mockFactory.AssertExpectations(t)
|
||||
mockConfig.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestQuery_GetCompletionError(t *testing.T) {
|
||||
// Setup mocks
|
||||
mockAI := new(MockAI)
|
||||
mockFactory := new(MockAIClientFactory)
|
||||
mockConfig := new(MockConfigProvider)
|
||||
|
||||
// Set test implementations
|
||||
ai.SetTestAIClientFactory(mockFactory)
|
||||
ai.SetTestConfigProvider(mockConfig)
|
||||
defer ai.ResetTestImplementations()
|
||||
|
||||
// Define test data
|
||||
testBackend := "test-backend"
|
||||
testQuery := "test query"
|
||||
|
||||
// Setup expectations
|
||||
mockFactory.On("NewClient", testBackend).Return(mockAI)
|
||||
mockAI.On("Close").Return()
|
||||
|
||||
// Set up configuration with a valid provider
|
||||
mockConfig.On("UnmarshalKey", "ai", mock.Anything).Run(func(args mock.Arguments) {
|
||||
config := args.Get(1).(*ai.AIConfiguration)
|
||||
*config = ai.AIConfiguration{
|
||||
Providers: []ai.AIProvider{
|
||||
{
|
||||
Name: testBackend,
|
||||
},
|
||||
},
|
||||
}
|
||||
}).Return(nil)
|
||||
|
||||
mockAI.On("Configure", mock.AnythingOfType("*ai.AIProvider")).Return(nil)
|
||||
mockAI.On("GetCompletion", mock.Anything, testQuery).Return("", errors.New("completion error"))
|
||||
|
||||
// Create handler and call Query
|
||||
handler := &Handler{}
|
||||
response, err := handler.Query(context.Background(), &schemav1.QueryRequest{
|
||||
Backend: testBackend,
|
||||
Query: testQuery,
|
||||
})
|
||||
|
||||
// Assertions
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, response)
|
||||
assert.Equal(t, "", response.Response)
|
||||
assert.Equal(t, "completion error", response.Error.Message)
|
||||
|
||||
// Verify mocks
|
||||
mockAI.AssertExpectations(t)
|
||||
mockFactory.AssertExpectations(t)
|
||||
mockConfig.AssertExpectations(t)
|
||||
}
|
@ -14,6 +14,7 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
@ -311,3 +312,33 @@ func LabelStrToSelector(labelStr string) labels.Selector {
|
||||
}
|
||||
return labels.SelectorFromSet(labels.Set(labelSelectorMap))
|
||||
}
|
||||
|
||||
// CaptureOutput captures the output of a function that writes to stdout
|
||||
func CaptureOutput(f func()) string {
|
||||
old := os.Stdout
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create pipe: %v", err))
|
||||
}
|
||||
os.Stdout = w
|
||||
// Ensure os.Stdout is restored even if panic occurs
|
||||
defer func() {
|
||||
os.Stdout = old
|
||||
}()
|
||||
|
||||
f()
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
panic(fmt.Sprintf("failed to close writer: %v", err))
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
panic(fmt.Sprintf("failed to read from pipe: %v", err))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Contains checks if substr is present in s
|
||||
func Contains(s, substr string) bool {
|
||||
return bytes.Contains([]byte(s), []byte(substr))
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user