mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-05-10 17:16:06 +00:00
feat: new analyzers (#1459)
* chore: rebased chore: removed trivy Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: updated deps Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix: missing error Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix: missing error Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * feat: switching old sonnet to message API Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * feat: added three new analyzers Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.2 (#1400) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * docs: remove extra dollar sign in README.md (#1410) Signed-off-by: Qian_Xiao <heyheyco@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * test: add tests for `k8sgpt/pkg/analyzer/events.go` (#913) * test: add tests for events_test.go Signed-off-by: Eshaan Aggarwal <96648934+EshaanAgg@users.noreply.github.com> * feat: fixed event tests Signed-off-by: Alex Jones <alexsimonjones@gmail.com> --------- Signed-off-by: Eshaan Aggarwal <96648934+EshaanAgg@users.noreply.github.com> Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * docs: add table of contents and cleanup (#1413) Signed-off-by: hadi2f244 <m.h.azaddel@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: linter (#1414) * chore: changing linter Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: changing linter Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: changing linter Signed-off-by: Alex Jones <alexsimonjones@gmail.com> --------- Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): pin golangci/golangci-lint-action action to 1481404 (#1415) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): update goreleaser/goreleaser-action digest to 9c156ee (#1411) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix: prometheus UTF8Validation (#1404) Signed-off-by: Kay Yan <kay.yan@daocloud.io> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix(deps): update module gopkg.in/yaml.v2 to v3 (#1363) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: added new AmazonBedrock model (#1390) * Update AI Bedrock region - Added mumbai region Signed-off-by: Sakshi Singh <66963254+sakshirajput02@users.noreply.github.com> * Update amazonbedrock.go Signed-off-by: Sakshi Singh <66963254+sakshirajput02@users.noreply.github.com> * Added new AI model to work for ap-south-1 region[that does not uses inference profile] Signed-off-by: Sakshi Singh <66963254+sakshirajput02@users.noreply.github.com> --------- Signed-off-by: Sakshi Singh <66963254+sakshirajput02@users.noreply.github.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.3 (#1412) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): update module github.com/docker/docker to v28 (#1376) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: updating deps (#1422) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): update docker/setup-buildx-action digest to b5ca514 (#1371) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.4 (#1421) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: fix workflows (#1423) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.5 (#1424) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: fixing docker build push action (#1426) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: updated actor for login (#1430) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): pin docker/build-push-action action to 471d1dc (#1428) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.6 (#1427) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: fixing build (#1431) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): update actions/upload-artifact digest to ea165f8 (#1425) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.7 (#1432) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: removed krew release (#1434) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.8 (#1435) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: fixing (#1437) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(deps): pin dependencies (#1440) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.9 (#1439) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix: pod analyzer catches errors when containers are in Terminated state (#1438) Signed-off-by: Guoxun Wei <guwe@microsoft.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * feat: add a naive support of bedrock inference profile (#1446) * feat: add a naive support of bedrock inference profile Signed-off-by: Tony Chen <tony_chen@discovery.com> * feat: improving the tests Signed-off-by: Alex Jones <alexsimonjones@gmail.com> --------- Signed-off-by: Tony Chen <tony_chen@discovery.com> Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix(deps): update module gopkg.in/yaml.v2 to v3 (#1417) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix(deps): update module helm.sh/helm/v3 to v3.17.3 [security] (#1448) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.10 (#1441) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * feat: call bedrock with inference profile (#1449) * call bedrock with inference profile Signed-off-by: Tony Chen <tony_chen@discovery.com> * add validation and test Signed-off-by: Tony Chen <tony_chen@discovery.com> * update test Signed-off-by: Tony Chen <tony_chen@discovery.com> --------- Signed-off-by: Tony Chen <tony_chen@discovery.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix(deps): update module gopkg.in/yaml.v2 to v3 (#1447) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * docs: fix the slack invite link (#1450) Signed-off-by: Pengfei Ni <feiskyer@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * feat: add verbose flag to enable detailed output (#1420) * feat: add verbose flag to enable detailed output Signed-off-by: Yicheng <36285652+zyc140345@users.noreply.github.com> * test: add verbose output tests for analysis.go and root.go Signed-off-by: Yicheng <36285652+zyc140345@users.noreply.github.com> --------- Signed-off-by: Yicheng <36285652+zyc140345@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix(deps): update module gopkg.in/yaml.v2 to v3 (#1453) Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * feat: improved test coverage (#1455) Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * fix: config ai provider in query (#1457) Signed-off-by: Guoxun Wei <guwe@microsoft.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore(main): release 0.4.11 (#1451) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: fixed test Signed-off-by: AlexsJones <alexsimonjones@gmail.com> * chore: fixed test --------- Signed-off-by: AlexsJones <alexsimonjones@gmail.com> Signed-off-by: Qian_Xiao <heyheyco@gmail.com> Signed-off-by: Eshaan Aggarwal <96648934+EshaanAgg@users.noreply.github.com> Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: hadi2f244 <m.h.azaddel@gmail.com> Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: Kay Yan <kay.yan@daocloud.io> Signed-off-by: Sakshi Singh <66963254+sakshirajput02@users.noreply.github.com> Signed-off-by: Guoxun Wei <guwe@microsoft.com> Signed-off-by: Tony Chen <tony_chen@discovery.com> Signed-off-by: Pengfei Ni <feiskyer@gmail.com> Signed-off-by: Yicheng <36285652+zyc140345@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Qian_Xiao <heyheyco@gmail.com> Co-authored-by: Eshaan Aggarwal <96648934+EshaanAgg@users.noreply.github.com> Co-authored-by: Hadi Azaddel <m.h.azaddel@gmail.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Kay Yan <kay.yan@daocloud.io> Co-authored-by: Sakshi Singh <66963254+sakshirajput02@users.noreply.github.com> Co-authored-by: gossion <guwe@microsoft.com> Co-authored-by: ju187 <tony_chen@discovery.com> Co-authored-by: Pengfei Ni <feiskyer@users.noreply.github.com> Co-authored-by: Yicheng <36285652+zyc140345@users.noreply.github.com>
This commit is contained in:
parent
0553b984b7
commit
a128906136
@ -196,6 +196,9 @@ you will be able to write your own analyzers.
|
||||
- [x] gateway
|
||||
- [x] httproute
|
||||
- [x] logAnalyzer
|
||||
- [x] storageAnalyzer
|
||||
- [x] securityAnalyzer
|
||||
- [x] configMapAnalyzer
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -43,6 +43,7 @@ var coreAnalyzerMap = map[string]common.IAnalyzer{
|
||||
"Node": NodeAnalyzer{},
|
||||
"ValidatingWebhookConfiguration": ValidatingWebhookAnalyzer{},
|
||||
"MutatingWebhookConfiguration": MutatingWebhookAnalyzer{},
|
||||
"ConfigMap": ConfigMapAnalyzer{},
|
||||
}
|
||||
|
||||
var additionalAnalyzerMap = map[string]common.IAnalyzer{
|
||||
@ -53,6 +54,8 @@ var additionalAnalyzerMap = map[string]common.IAnalyzer{
|
||||
"GatewayClass": GatewayClassAnalyzer{},
|
||||
"Gateway": GatewayAnalyzer{},
|
||||
"HTTPRoute": HTTPRouteAnalyzer{},
|
||||
"Storage": StorageAnalyzer{},
|
||||
"Security": SecurityAnalyzer{},
|
||||
}
|
||||
|
||||
func ListFilters() ([]string, []string, []string) {
|
||||
|
125
pkg/analyzer/configmap.go
Normal file
125
pkg/analyzer/configmap.go
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ConfigMapAnalyzer struct{}
|
||||
|
||||
func (ConfigMapAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kind := "ConfigMap"
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
})
|
||||
|
||||
// Get all ConfigMaps in the namespace
|
||||
configMaps, err := a.Client.GetClient().CoreV1().ConfigMaps(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get all Pods to check ConfigMap usage
|
||||
pods, err := a.Client.GetClient().CoreV1().Pods(a.Namespace).List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var results []common.Result
|
||||
|
||||
// Track which ConfigMaps are used
|
||||
usedConfigMaps := make(map[string]bool)
|
||||
configMapUsage := make(map[string][]string) // maps ConfigMap name to list of pods using it
|
||||
|
||||
// Analyze ConfigMap usage in Pods
|
||||
for _, pod := range pods.Items {
|
||||
// Check volume mounts
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.ConfigMap != nil {
|
||||
usedConfigMaps[volume.ConfigMap.Name] = true
|
||||
configMapUsage[volume.ConfigMap.Name] = append(configMapUsage[volume.ConfigMap.Name], pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check environment variables
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.ConfigMapRef != nil {
|
||||
usedConfigMaps[env.ConfigMapRef.Name] = true
|
||||
configMapUsage[env.ConfigMapRef.Name] = append(configMapUsage[env.ConfigMapRef.Name], pod.Name)
|
||||
}
|
||||
}
|
||||
for _, env := range container.Env {
|
||||
if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil {
|
||||
usedConfigMaps[env.ValueFrom.ConfigMapKeyRef.Name] = true
|
||||
configMapUsage[env.ValueFrom.ConfigMapKeyRef.Name] = append(configMapUsage[env.ValueFrom.ConfigMapKeyRef.Name], pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze each ConfigMap
|
||||
for _, cm := range configMaps.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for unused ConfigMaps
|
||||
if !usedConfigMaps[cm.Name] {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ConfigMap %s is not used by any pods in the namespace", cm.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for empty ConfigMaps
|
||||
if len(cm.Data) == 0 && len(cm.BinaryData) == 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ConfigMap %s is empty", cm.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for large ConfigMaps (over 1MB)
|
||||
totalSize := 0
|
||||
for _, value := range cm.Data {
|
||||
totalSize += len(value)
|
||||
}
|
||||
for _, value := range cm.BinaryData {
|
||||
totalSize += len(value)
|
||||
}
|
||||
if totalSize > 1024*1024 { // 1MB
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ConfigMap %s is larger than 1MB (%d bytes)", cm.Name, totalSize),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: kind,
|
||||
Name: fmt.Sprintf("%s/%s", cm.Namespace, cm.Name),
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues(kind, cm.Name, cm.Namespace).Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
149
pkg/analyzer/configmap_test.go
Normal file
149
pkg/analyzer/configmap_test.go
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestConfigMapAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
configMaps []v1.ConfigMap
|
||||
pods []v1.Pod
|
||||
expectedErrors int
|
||||
}{
|
||||
{
|
||||
name: "unused configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "unused-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "empty configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "empty-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "large configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "large-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"key": string(make([]byte, 1024*1024+1)), // 1MB + 1 byte
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "used configmap",
|
||||
namespace: "default",
|
||||
configMaps: []v1.ConfigMap{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "used-cm",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "used-cm",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
// Create test resources
|
||||
for _, cm := range tt.configMaps {
|
||||
_, err := client.CoreV1().ConfigMaps(tt.namespace).Create(context.TODO(), &cm, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, pod := range tt.pods {
|
||||
_, err := client.CoreV1().Pods(tt.namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
analyzer := ConfigMapAnalyzer{}
|
||||
results, err := analyzer.Analyze(common.Analyzer{
|
||||
Client: &kubernetes.Client{Client: client},
|
||||
Context: context.TODO(),
|
||||
Namespace: tt.namespace,
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedErrors, len(results))
|
||||
})
|
||||
}
|
||||
}
|
@ -293,12 +293,3 @@ func TestCheckCronScheduleIsValid(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func int64Ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
201
pkg/analyzer/security.go
Normal file
201
pkg/analyzer/security.go
Normal file
@ -0,0 +1,201 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type SecurityAnalyzer struct{}
|
||||
|
||||
func (SecurityAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kind := "Security"
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
})
|
||||
|
||||
var results []common.Result
|
||||
|
||||
// Analyze ServiceAccounts
|
||||
saResults, err := analyzeServiceAccounts(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, saResults...)
|
||||
|
||||
// Analyze RoleBindings
|
||||
rbResults, err := analyzeRoleBindings(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, rbResults...)
|
||||
|
||||
// Analyze Pod Security Contexts
|
||||
podResults, err := analyzePodSecurityContexts(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, podResults...)
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzeServiceAccounts(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
sas, err := a.Client.GetClient().CoreV1().ServiceAccounts(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sa := range sas.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for default service account usage
|
||||
if sa.Name == "default" {
|
||||
pods, err := a.Client.GetClient().CoreV1().Pods(sa.Namespace).List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
defaultSAUsers := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.ServiceAccountName == "default" {
|
||||
defaultSAUsers = append(defaultSAUsers, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(defaultSAUsers) > 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Default service account is being used by pods: %v", defaultSAUsers),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Security/ServiceAccount",
|
||||
Name: fmt.Sprintf("%s/%s", sa.Namespace, sa.Name),
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Security/ServiceAccount", sa.Name, sa.Namespace).Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzeRoleBindings(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
rbs, err := a.Client.GetClient().RbacV1().RoleBindings(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, rb := range rbs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for wildcards in role references
|
||||
role, err := a.Client.GetClient().RbacV1().Roles(rb.Namespace).Get(a.Context, rb.RoleRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, rule := range role.Rules {
|
||||
if containsWildcard(rule.Verbs) || containsWildcard(rule.Resources) {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("RoleBinding %s references Role %s which contains wildcard permissions - this is not recommended for security best practices", rb.Name, role.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Security/RoleBinding",
|
||||
Name: fmt.Sprintf("%s/%s", rb.Namespace, rb.Name),
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Security/RoleBinding", rb.Name, rb.Namespace).Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzePodSecurityContexts(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
pods, err := a.Client.GetClient().CoreV1().Pods(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for privileged containers first (most critical)
|
||||
hasPrivilegedContainer := false
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Container %s in pod %s is running as privileged which poses security risks", container.Name, pod.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
hasPrivilegedContainer = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Only check for missing security context if no privileged containers found
|
||||
if !hasPrivilegedContainer && pod.Spec.SecurityContext == nil {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Pod %s does not have a security context defined which may pose security risks", pod.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Security/Pod",
|
||||
Name: fmt.Sprintf("%s/%s", pod.Namespace, pod.Name),
|
||||
Error: failures[:1],
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Security/Pod", pod.Name, pod.Namespace).Set(1)
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func containsWildcard(slice []string) bool {
|
||||
for _, item := range slice {
|
||||
if item == "*" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
181
pkg/analyzer/security_test.go
Normal file
181
pkg/analyzer/security_test.go
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestSecurityAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
serviceAccounts []v1.ServiceAccount
|
||||
pods []v1.Pod
|
||||
roles []rbacv1.Role
|
||||
roleBindings []rbacv1.RoleBinding
|
||||
expectedErrors int
|
||||
expectedKinds []string
|
||||
}{
|
||||
{
|
||||
name: "default service account usage",
|
||||
namespace: "default",
|
||||
serviceAccounts: []v1.ServiceAccount{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: "default",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 2,
|
||||
expectedKinds: []string{"Security/ServiceAccount", "Security/Pod"},
|
||||
},
|
||||
{
|
||||
name: "privileged container",
|
||||
namespace: "default",
|
||||
pods: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "privileged-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "privileged-container",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
expectedKinds: []string{"Security/Pod"},
|
||||
},
|
||||
{
|
||||
name: "wildcard permissions in role",
|
||||
namespace: "default",
|
||||
roles: []rbacv1.Role{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wildcard-role",
|
||||
Namespace: "default",
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"*"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
roleBindings: []rbacv1.RoleBinding{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-binding",
|
||||
Namespace: "default",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: "wildcard-role",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
expectedKinds: []string{"Security/RoleBinding"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
// Create test resources
|
||||
for _, sa := range tt.serviceAccounts {
|
||||
_, err := client.CoreV1().ServiceAccounts(tt.namespace).Create(context.TODO(), &sa, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, pod := range tt.pods {
|
||||
_, err := client.CoreV1().Pods(tt.namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, role := range tt.roles {
|
||||
_, err := client.RbacV1().Roles(tt.namespace).Create(context.TODO(), &role, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, rb := range tt.roleBindings {
|
||||
_, err := client.RbacV1().RoleBindings(tt.namespace).Create(context.TODO(), &rb, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
analyzer := SecurityAnalyzer{}
|
||||
results, err := analyzer.Analyze(common.Analyzer{
|
||||
Client: &kubernetes.Client{Client: client},
|
||||
Context: context.TODO(),
|
||||
Namespace: tt.namespace,
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Debug: Print all results
|
||||
t.Logf("Got %d results:", len(results))
|
||||
for _, result := range results {
|
||||
t.Logf(" Kind: %s, Name: %s", result.Kind, result.Name)
|
||||
for _, failure := range result.Error {
|
||||
t.Logf(" Failure: %s", failure.Text)
|
||||
}
|
||||
}
|
||||
|
||||
// Count results by kind
|
||||
resultsByKind := make(map[string]int)
|
||||
for _, result := range results {
|
||||
resultsByKind[result.Kind]++
|
||||
}
|
||||
|
||||
// Check that we have the expected number of results for each kind
|
||||
for _, expectedKind := range tt.expectedKinds {
|
||||
assert.Equal(t, 1, resultsByKind[expectedKind], "Expected 1 result of kind %s", expectedKind)
|
||||
}
|
||||
|
||||
// Check total number of results matches expected kinds
|
||||
assert.Equal(t, len(tt.expectedKinds), len(results), "Expected %d total results", len(tt.expectedKinds))
|
||||
})
|
||||
}
|
||||
}
|
216
pkg/analyzer/storage.go
Normal file
216
pkg/analyzer/storage.go
Normal file
@ -0,0 +1,216 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type StorageAnalyzer struct{}
|
||||
|
||||
func (StorageAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kind := "Storage"
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
})
|
||||
|
||||
var results []common.Result
|
||||
|
||||
// Analyze StorageClasses
|
||||
scResults, err := analyzeStorageClasses(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, scResults...)
|
||||
|
||||
// Analyze PersistentVolumes
|
||||
pvResults, err := analyzePersistentVolumes(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, pvResults...)
|
||||
|
||||
// Analyze PVCs with enhanced checks
|
||||
pvcResults, err := analyzePersistentVolumeClaims(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, pvcResults...)
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzeStorageClasses(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
scs, err := a.Client.GetClient().StorageV1().StorageClasses().List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sc := range scs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for deprecated storage classes
|
||||
if sc.Provisioner == "kubernetes.io/no-provisioner" {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("StorageClass %s uses deprecated provisioner 'kubernetes.io/no-provisioner'", sc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for default storage class
|
||||
if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
|
||||
// Check if there are multiple default storage classes
|
||||
defaultCount := 0
|
||||
for _, otherSc := range scs.Items {
|
||||
if otherSc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
|
||||
defaultCount++
|
||||
}
|
||||
}
|
||||
if defaultCount > 1 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Multiple default StorageClasses found (%d), which can cause confusion", defaultCount),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Storage/StorageClass",
|
||||
Name: sc.Name,
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Storage/StorageClass", sc.Name, "").Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzePersistentVolumes(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
pvs, err := a.Client.GetClient().CoreV1().PersistentVolumes().List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pv := range pvs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for released PVs
|
||||
if pv.Status.Phase == v1.VolumeReleased {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolume %s is in Released state and should be cleaned up", pv.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for failed PVs
|
||||
if pv.Status.Phase == v1.VolumeFailed {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolume %s is in Failed state", pv.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
// Check for small PVs (less than 1Gi)
|
||||
if capacity, ok := pv.Spec.Capacity[v1.ResourceStorage]; ok {
|
||||
if capacity.Cmp(resource.MustParse("1Gi")) < 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolume %s has small capacity (%s)", pv.Name, capacity.String()),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Storage/PersistentVolume",
|
||||
Name: pv.Name,
|
||||
Error: failures,
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Storage/PersistentVolume", pv.Name, "").Set(float64(len(failures)))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func analyzePersistentVolumeClaims(a common.Analyzer) ([]common.Result, error) {
|
||||
var results []common.Result
|
||||
|
||||
pvcs, err := a.Client.GetClient().CoreV1().PersistentVolumeClaims(a.Namespace).List(a.Context, metav1.ListOptions{
|
||||
LabelSelector: a.LabelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcs.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
// Check for PVC state issues first (most critical)
|
||||
switch pvc.Status.Phase {
|
||||
case v1.ClaimPending:
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s is in Pending state", pvc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
case v1.ClaimLost:
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s is in Lost state", pvc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
default:
|
||||
// Only check other issues if PVC is not in a critical state
|
||||
if capacity, ok := pvc.Spec.Resources.Requests[v1.ResourceStorage]; ok {
|
||||
if capacity.Cmp(resource.MustParse("1Gi")) < 0 {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s has small capacity (%s)", pvc.Name, capacity.String()),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing storage class
|
||||
if pvc.Spec.StorageClassName == nil && pvc.Spec.VolumeName == "" {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("PersistentVolumeClaim %s has no StorageClass specified", pvc.Name),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Only report the first failure found
|
||||
if len(failures) > 0 {
|
||||
results = append(results, common.Result{
|
||||
Kind: "Storage/PersistentVolumeClaim",
|
||||
Name: fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name),
|
||||
Error: failures[:1],
|
||||
})
|
||||
AnalyzerErrorsMetric.WithLabelValues("Storage/PersistentVolumeClaim", pvc.Name, pvc.Namespace).Set(1)
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
254
pkg/analyzer/storage_test.go
Normal file
254
pkg/analyzer/storage_test.go
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestStorageAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
storageClasses []storagev1.StorageClass
|
||||
pvs []v1.PersistentVolume
|
||||
pvcs []v1.PersistentVolumeClaim
|
||||
expectedErrors int
|
||||
}{
|
||||
{
|
||||
name: "Deprecated StorageClass",
|
||||
namespace: "default",
|
||||
storageClasses: []storagev1.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deprecated-sc",
|
||||
},
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Multiple Default StorageClasses",
|
||||
namespace: "default",
|
||||
storageClasses: []storagev1.StorageClass{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-sc1",
|
||||
Annotations: map[string]string{
|
||||
"storageclass.kubernetes.io/is-default-class": "true",
|
||||
},
|
||||
},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default-sc2",
|
||||
Annotations: map[string]string{
|
||||
"storageclass.kubernetes.io/is-default-class": "true",
|
||||
},
|
||||
},
|
||||
Provisioner: "kubernetes.io/aws-ebs",
|
||||
},
|
||||
},
|
||||
expectedErrors: 2,
|
||||
},
|
||||
{
|
||||
name: "Released PV",
|
||||
namespace: "default",
|
||||
pvs: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "released-pv",
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeReleased,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Failed PV",
|
||||
namespace: "default",
|
||||
pvs: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "failed-pv",
|
||||
},
|
||||
Status: v1.PersistentVolumeStatus{
|
||||
Phase: v1.VolumeFailed,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Small PV",
|
||||
namespace: "default",
|
||||
pvs: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "small-pv",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Pending PVC",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pending-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimPending,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Lost PVC",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "lost-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimLost,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "Small PVC",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "small-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
{
|
||||
name: "PVC without StorageClass",
|
||||
namespace: "default",
|
||||
pvcs: []v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-sc-pvc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrors: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create fake client
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
// Create test resources
|
||||
for _, sc := range tt.storageClasses {
|
||||
_, err := client.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create StorageClass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pv := range tt.pvs {
|
||||
_, err := client.CoreV1().PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PV: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pvc := range tt.pvcs {
|
||||
_, err := client.CoreV1().PersistentVolumeClaims(tt.namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PVC: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create analyzer
|
||||
analyzer := StorageAnalyzer{}
|
||||
|
||||
// Create analyzer config
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: client,
|
||||
},
|
||||
Context: context.TODO(),
|
||||
Namespace: tt.namespace,
|
||||
}
|
||||
|
||||
// Run analysis
|
||||
results, err := analyzer.Analyze(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to run analysis: %v", err)
|
||||
}
|
||||
|
||||
// Count total errors
|
||||
totalErrors := 0
|
||||
for _, result := range results {
|
||||
totalErrors += len(result.Error)
|
||||
}
|
||||
|
||||
// Check error count
|
||||
if totalErrors != tt.expectedErrors {
|
||||
t.Errorf("Expected %d errors, got %d", tt.expectedErrors, totalErrors)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
10
pkg/analyzer/test_utils.go
Normal file
10
pkg/analyzer/test_utils.go
Normal file
@ -0,0 +1,10 @@
|
||||
package analyzer
|
||||
|
||||
// Helper functions for tests
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func int64Ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
Loading…
Reference in New Issue
Block a user