diff --git a/README.md b/README.md index 7fad0a0..682fe89 100644 --- a/README.md +++ b/README.md @@ -141,11 +141,14 @@ you will be able to write your own analyzers. - [x] eventAnalyzer - [x] ingressAnalyzer - [x] statefulSetAnalyzer +- [x] deploymentAnalyzer +- [x] cronJobAnalyzer #### Optional - [x] hpaAnalyzer - [x] pdbAnalyzer +- [x] networkPolicyAnalyzer ## Usage diff --git a/go.mod b/go.mod index 01a3541..7a3178c 100644 --- a/go.mod +++ b/go.mod @@ -119,6 +119,7 @@ require ( github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect + github.com/robfig/cron/v3 v3.0.1 github.com/rubenv/sql-migrate v1.3.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.37.0 // indirect diff --git a/go.sum b/go.sum index 6050187..846f5f0 100644 --- a/go.sum +++ b/go.sum @@ -607,6 +607,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/pkg/analyzer/analyzer.go b/pkg/analyzer/analyzer.go index e74eb00..d5b2d3f 100644 --- a/pkg/analyzer/analyzer.go +++ b/pkg/analyzer/analyzer.go @@ -11,16 +11,19 @@ import ( var coreAnalyzerMap = map[string]common.IAnalyzer{ "Pod": PodAnalyzer{}, + "Deployment": DeploymentAnalyzer{}, "ReplicaSet": ReplicaSetAnalyzer{}, "PersistentVolumeClaim": PvcAnalyzer{}, "Service": ServiceAnalyzer{}, "Ingress": IngressAnalyzer{}, "StatefulSet": StatefulSetAnalyzer{}, + "CronJob": CronJobAnalyzer{}, } var additionalAnalyzerMap = map[string]common.IAnalyzer{ "HorizontalPodAutoScaler": HpaAnalyzer{}, "PodDisruptionBudget": PdbAnalyzer{}, + "NetworkPolicy": NetworkPolicyAnalyzer{}, } func ListFilters() ([]string, []string, []string) { diff --git a/pkg/analyzer/cronjob.go b/pkg/analyzer/cronjob.go new file mode 100644 index 0000000..db773bb --- /dev/null +++ b/pkg/analyzer/cronjob.go @@ -0,0 +1,110 @@ +package analyzer + +import ( + "fmt" + "time" + + "github.com/k8sgpt-ai/k8sgpt/pkg/common" + "github.com/k8sgpt-ai/k8sgpt/pkg/util" + cron "github.com/robfig/cron/v3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CronJobAnalyzer struct{} + +func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) { + var results []common.Result + + cronJobList, err := a.Client.GetClient().BatchV1().CronJobs("").List(a.Context, v1.ListOptions{}) + if err != nil { + return results, err + } + + var preAnalysis = map[string]common.PreAnalysis{} + + for _, cronJob := range cronJobList.Items { + var failures []common.Failure + if cronJob.Spec.Suspend != nil && *cronJob.Spec.Suspend { + failures = append(failures, common.Failure{ + Text: fmt.Sprintf("CronJob %s is suspended", cronJob.Name), + Sensitive: []common.Sensitive{ + { + Unmasked: cronJob.Namespace, + Masked: util.MaskString(cronJob.Namespace), + }, + { + Unmasked: cronJob.Name, + Masked: util.MaskString(cronJob.Name), + }, + }, + }) + } else { + // check the schedule format + if _, err := CheckCronScheduleIsValid(cronJob.Spec.Schedule); err != nil { + failures = append(failures, common.Failure{ + Text: fmt.Sprintf("CronJob %s has an invalid schedule: %s", cronJob.Name, err.Error()), + Sensitive: []common.Sensitive{ + { + Unmasked: cronJob.Namespace, + Masked: util.MaskString(cronJob.Namespace), + }, + { + Unmasked: cronJob.Name, + Masked: util.MaskString(cronJob.Name), + }, + }, + }) + } + + // check the starting deadline + if cronJob.Spec.StartingDeadlineSeconds != nil { + deadline := time.Duration(*cronJob.Spec.StartingDeadlineSeconds) * time.Second + if deadline < 0 { + + failures = append(failures, common.Failure{ + Text: fmt.Sprintf("CronJob %s has a negative starting deadline", cronJob.Name), + Sensitive: []common.Sensitive{ + { + Unmasked: cronJob.Namespace, + Masked: util.MaskString(cronJob.Namespace), + }, + { + Unmasked: cronJob.Name, + Masked: util.MaskString(cronJob.Name), + }, + }, + }) + + } + } + + } + + if len(failures) > 0 { + preAnalysis[cronJob.Name] = common.PreAnalysis{ + FailureDetails: failures, + } + } + + for key, value := range preAnalysis { + currentAnalysis := common.Result{ + Kind: "CronJob", + Name: key, + Error: value.FailureDetails, + } + a.Results = append(results, currentAnalysis) + } + } + + return a.Results, nil +} + +// Check CRON schedule format +func CheckCronScheduleIsValid(schedule string) (bool, error) { + _, err := cron.ParseStandard(schedule) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/pkg/analyzer/cronjob_test.go b/pkg/analyzer/cronjob_test.go new file mode 100644 index 0000000..b6e31c8 --- /dev/null +++ b/pkg/analyzer/cronjob_test.go @@ -0,0 +1,126 @@ +package analyzer + +import ( + "context" + "testing" + + "github.com/k8sgpt-ai/k8sgpt/pkg/common" + "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes" + "github.com/magiconair/properties/assert" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestCronJobSuccess(t *testing.T) { + clientset := fake.NewSimpleClientset(&batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-cronjob", + Namespace: "default", + Annotations: map[string]string{ + "analysisDate": "2022-04-01", + }, + Labels: map[string]string{ + "app": "example-app", + }, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "*/1 * * * *", + ConcurrencyPolicy: "Allow", + JobTemplate: batchv1.JobTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "example-app", + }, + }, + Spec: batchv1.JobSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "example-container", + Image: "nginx", + }, + }, + RestartPolicy: v1.RestartPolicyOnFailure, + }, + }, + }, + }, + }, + }) + + config := common.Analyzer{ + Client: &kubernetes.Client{ + Client: clientset, + }, + Context: context.Background(), + Namespace: "default", + } + + analyzer := CronJobAnalyzer{} + analysisResults, err := analyzer.Analyze(config) + if err != nil { + t.Error(err) + } + + assert.Equal(t, len(analysisResults), 0) +} + +func TestCronJobBroken(t *testing.T) { + clientset := fake.NewSimpleClientset(&batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-cronjob", + Namespace: "default", + Annotations: map[string]string{ + "analysisDate": "2022-04-01", + }, + Labels: map[string]string{ + "app": "example-app", + }, + }, + Spec: batchv1.CronJobSpec{ + Schedule: "*** * * * *", + ConcurrencyPolicy: "Allow", + JobTemplate: batchv1.JobTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "example-app", + }, + }, + Spec: batchv1.JobSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "example-container", + Image: "nginx", + }, + }, + RestartPolicy: v1.RestartPolicyOnFailure, + }, + }, + }, + }, + }, + }) + + config := common.Analyzer{ + Client: &kubernetes.Client{ + Client: clientset, + }, + Context: context.Background(), + Namespace: "default", + } + + analyzer := CronJobAnalyzer{} + analysisResults, err := analyzer.Analyze(config) + if err != nil { + t.Error(err) + } + + assert.Equal(t, len(analysisResults), 1) + assert.Equal(t, analysisResults[0].Name, "example-cronjob") + assert.Equal(t, analysisResults[0].Kind, "CronJob") +} diff --git a/pkg/analyzer/deployment.go b/pkg/analyzer/deployment.go new file mode 100644 index 0000000..4f3b77c --- /dev/null +++ b/pkg/analyzer/deployment.go @@ -0,0 +1,62 @@ +package analyzer + +import ( + "context" + "fmt" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/k8sgpt-ai/k8sgpt/pkg/common" + "github.com/k8sgpt-ai/k8sgpt/pkg/util" +) + +// DeploymentAnalyzer is an analyzer that checks for misconfigured Deployments +type DeploymentAnalyzer struct { +} + +// Analyze scans all namespaces for Deployments with misconfigurations +func (d DeploymentAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) { + + deployments, err := a.Client.GetClient().AppsV1().Deployments("").List(context.Background(), v1.ListOptions{}) + if err != nil { + return nil, err + } + var preAnalysis = map[string]common.PreAnalysis{} + + for _, deployment := range deployments.Items { + var failures []common.Failure + if *deployment.Spec.Replicas != deployment.Status.Replicas { + failures = append(failures, common.Failure{ + Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas), + Sensitive: []common.Sensitive{ + { + Unmasked: deployment.Namespace, + Masked: util.MaskString(deployment.Namespace), + }, + { + Unmasked: deployment.Name, + Masked: util.MaskString(deployment.Name), + }, + }}) + } + if len(failures) > 0 { + preAnalysis[fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name)] = common.PreAnalysis{ + FailureDetails: failures, + Deployment: deployment, + } + } + + } + + for key, value := range preAnalysis { + var currentAnalysis = common.Result{ + Kind: "Deployment", + Name: key, + Error: value.FailureDetails, + } + + a.Results = append(a.Results, currentAnalysis) + } + + return a.Results, nil +} diff --git a/pkg/analyzer/deployment_test.go b/pkg/analyzer/deployment_test.go new file mode 100644 index 0000000..1365c85 --- /dev/null +++ b/pkg/analyzer/deployment_test.go @@ -0,0 +1,62 @@ +package analyzer + +import ( + "context" + "testing" + + "github.com/k8sgpt-ai/k8sgpt/pkg/common" + "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes" + "github.com/magiconair/properties/assert" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestDeploymentAnalyzer(t *testing.T) { + clientset := fake.NewSimpleClientset(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: func() *int32 { i := int32(3); return &i }(), + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "example-container", + Image: "nginx", + Ports: []v1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 2, + AvailableReplicas: 1, + }, + }) + + config := common.Analyzer{ + Client: &kubernetes.Client{ + Client: clientset, + }, + Context: context.Background(), + Namespace: "default", + } + + deploymentAnalyzer := DeploymentAnalyzer{} + analysisResults, err := deploymentAnalyzer.Analyze(config) + if err != nil { + t.Error(err) + } + assert.Equal(t, len(analysisResults), 1) + assert.Equal(t, analysisResults[0].Kind, "Deployment") + assert.Equal(t, analysisResults[0].Name, "default/example") +} diff --git a/pkg/analyzer/netpol.go b/pkg/analyzer/netpol.go new file mode 100644 index 0000000..52115c3 --- /dev/null +++ b/pkg/analyzer/netpol.go @@ -0,0 +1,74 @@ +package analyzer + +import ( + "fmt" + + "github.com/k8sgpt-ai/k8sgpt/pkg/common" + "github.com/k8sgpt-ai/k8sgpt/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NetworkPolicyAnalyzer struct{} + +func (NetworkPolicyAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) { + // get all network policies in the namespace + policies, err := a.Client.GetClient().NetworkingV1(). + NetworkPolicies(a.Namespace).List(a.Context, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var preAnalysis = map[string]common.PreAnalysis{} + + for _, policy := range policies.Items { + var failures []common.Failure + + // Check if policy allows traffic to all pods in the namespace + if len(policy.Spec.PodSelector.MatchLabels) == 0 { + failures = append(failures, common.Failure{ + Text: fmt.Sprintf("Network policy allows traffic to all pods: %s", policy.Name), + Sensitive: []common.Sensitive{ + { + Unmasked: policy.Name, + Masked: util.MaskString(policy.Name), + }, + }, + }) + continue + } + // Check if policy is not applied to any pods + podList, err := util.GetPodListByLabels(a.Client.GetClient(), a.Namespace, policy.Spec.PodSelector.MatchLabels) + if err != nil { + return nil, err + } + if len(podList.Items) == 0 { + failures = append(failures, common.Failure{ + Text: fmt.Sprintf("Network policy is not applied to any pods: %s", policy.Name), + Sensitive: []common.Sensitive{ + { + Unmasked: policy.Name, + Masked: util.MaskString(policy.Name), + }, + }, + }) + } + + if len(failures) > 0 { + preAnalysis[policy.Name] = common.PreAnalysis{ + FailureDetails: failures, + NetworkPolicy: policy, + } + } + } + + for key, value := range preAnalysis { + currentAnalysis := common.Result{ + Kind: "NetworkPolicy", + Name: key, + Error: value.FailureDetails, + } + a.Results = append(a.Results, currentAnalysis) + } + + return a.Results, nil +} diff --git a/pkg/analyzer/netpol_test.go b/pkg/analyzer/netpol_test.go new file mode 100644 index 0000000..e3fcc87 --- /dev/null +++ b/pkg/analyzer/netpol_test.go @@ -0,0 +1,122 @@ +package analyzer + +import ( + "context" + "testing" + + "github.com/k8sgpt-ai/k8sgpt/pkg/common" + "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes" + "github.com/magiconair/properties/assert" + v1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestNetpolNoPods(t *testing.T) { + clientset := fake.NewSimpleClientset(&networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "example", + }, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "database", + }, + }, + }, + }, + }, + }, + }, + }) + + config := common.Analyzer{ + Client: &kubernetes.Client{ + Client: clientset, + }, + Context: context.Background(), + Namespace: "default", + } + + analyzer := NetworkPolicyAnalyzer{} + results, err := analyzer.Analyze(config) + if err != nil { + t.Error(err) + } + + assert.Equal(t, len(results), 1) + assert.Equal(t, results[0].Kind, "NetworkPolicy") + +} + +func TestNetpolWithPod(t *testing.T) { + clientset := fake.NewSimpleClientset(&networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "example", + }, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "database", + }, + }, + }, + }, + }, + }, + }, + }, &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: "default", + Labels: map[string]string{ + "app": "example", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "example", + Image: "example", + }, + }, + }, + }) + + config := common.Analyzer{ + Client: &kubernetes.Client{ + Client: clientset, + }, + Context: context.Background(), + Namespace: "default", + } + + analyzer := NetworkPolicyAnalyzer{} + results, err := analyzer.Analyze(config) + if err != nil { + t.Error(err) + } + + assert.Equal(t, len(results), 0) +} diff --git a/pkg/common/types.go b/pkg/common/types.go index 1d913f7..3fbf4ed 100644 --- a/pkg/common/types.go +++ b/pkg/common/types.go @@ -29,6 +29,7 @@ type Analyzer struct { type PreAnalysis struct { Pod v1.Pod FailureDetails []Failure + Deployment appsv1.Deployment ReplicaSet appsv1.ReplicaSet PersistentVolumeClaim v1.PersistentVolumeClaim Endpoint v1.Endpoints @@ -36,6 +37,7 @@ type PreAnalysis struct { HorizontalPodAutoscalers autov1.HorizontalPodAutoscaler PodDisruptionBudget policyv1.PodDisruptionBudget StatefulSet appsv1.StatefulSet + NetworkPolicy networkv1.NetworkPolicy // Integrations TrivyVulnerabilityReport trivy.VulnerabilityReport } diff --git a/pkg/util/util.go b/pkg/util/util.go index 8cf273c..3bd8f0d 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -8,7 +8,9 @@ import ( "regexp" "github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k "k8s.io/client-go/kubernetes" ) var anonymizePattern = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()-_=+[]{}|;':\",./<>?") @@ -133,3 +135,18 @@ func ReplaceIfMatch(text string, pattern string, replacement string) string { func GetCacheKey(provider string, sEnc string) string { return fmt.Sprintf("%s-%s", provider, sEnc) } + +func GetPodListByLabels(client k.Interface, + namespace string, + labels map[string]string) (*v1.PodList, error) { + pods, err := client.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{ + MatchLabels: labels, + }), + }) + if err != nil { + return nil, err + } + + return pods, nil +}