mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-04-27 11:11:31 +00:00
feat: improved test coverage (#1455)
Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
This commit is contained in:
parent
cf6f9289e1
commit
80904e3063
@ -22,179 +22,283 @@ import (
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/require"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestCronJobAnalyzer(t *testing.T) {
|
||||
suspend := new(bool)
|
||||
*suspend = true
|
||||
|
||||
invalidStartingDeadline := new(int64)
|
||||
*invalidStartingDeadline = -7
|
||||
|
||||
validStartingDeadline := new(int64)
|
||||
*validStartingDeadline = 7
|
||||
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ1",
|
||||
// This CronJob won't be list because of namespace filtering.
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ2",
|
||||
Namespace: "default",
|
||||
},
|
||||
// A suspended CronJob will contribute to failures.
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Suspend: suspend,
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
// Valid schedule
|
||||
Schedule: "*/1 * * * *",
|
||||
|
||||
// Negative starting deadline
|
||||
StartingDeadlineSeconds: invalidStartingDeadline,
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ4",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
// Invalid schedule
|
||||
Schedule: "*** * * * *",
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ5",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
// Valid schedule
|
||||
Schedule: "*/1 * * * *",
|
||||
|
||||
// Positive starting deadline shouldn't be any problem.
|
||||
StartingDeadlineSeconds: validStartingDeadline,
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
// This cronjob shouldn't contribute to any failures.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "successful-cronjob",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"analysisDate": "2022-04-01",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"app": "example-app",
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/1 * * * *",
|
||||
ConcurrencyPolicy: "Allow",
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "Suspended CronJob",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "example-app",
|
||||
},
|
||||
Name: "suspended-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "example-container",
|
||||
Image: "nginx",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *",
|
||||
Suspend: boolPtr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
),
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/suspended-job",
|
||||
failuresCount: 1, // One failure for being suspended
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule format",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "invalid-schedule",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron", // Invalid cron format
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/invalid-schedule",
|
||||
failuresCount: 1, // One failure for invalid schedule
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Negative starting deadline",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "negative-deadline",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *",
|
||||
StartingDeadlineSeconds: int64Ptr(-60), // Negative deadline
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/negative-deadline",
|
||||
failuresCount: 1, // One failure for negative deadline
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid CronJob",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-job",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Valid cron format
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for valid job
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple issues",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "multiple-issues",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron",
|
||||
StartingDeadlineSeconds: int64Ptr(-60),
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/multiple-issues",
|
||||
failuresCount: 2, // Two failures: invalid schedule and negative deadline
|
||||
},
|
||||
},
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
cjAnalyzer := CronJobAnalyzer{}
|
||||
results, err := cjAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
analyzer := CronJobAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
expectations := []string{
|
||||
"default/CJ2",
|
||||
"default/CJ3",
|
||||
"default/CJ4",
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i], result.Name)
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCronJobAnalyzerLabelSelectorFiltering(t *testing.T) {
|
||||
suspend := new(bool)
|
||||
*suspend = true
|
||||
|
||||
invalidStartingDeadline := new(int64)
|
||||
*invalidStartingDeadline = -7
|
||||
|
||||
validStartingDeadline := new(int64)
|
||||
*validStartingDeadline = 7
|
||||
func TestCronJobAnalyzerLabelSelector(t *testing.T) {
|
||||
clientSet := fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-with-label",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron", // This should trigger a failure
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "job-without-label",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "invalid-cron", // This should trigger a failure
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Test with label selector
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "cronjob",
|
||||
},
|
||||
},
|
||||
},
|
||||
&batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "CJ2",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
),
|
||||
Client: clientSet,
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
LabelSelector: "app=cronjob",
|
||||
LabelSelector: "app=test",
|
||||
}
|
||||
|
||||
cjAnalyzer := CronJobAnalyzer{}
|
||||
results, err := cjAnalyzer.Analyze(config)
|
||||
analyzer := CronJobAnalyzer{}
|
||||
results, err := analyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
require.Equal(t, "default/CJ1", results[0].Name)
|
||||
require.Equal(t, "default/job-with-label", results[0].Name)
|
||||
}
|
||||
|
||||
func TestCheckCronScheduleIsValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
schedule string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Valid schedule - every 5 minutes",
|
||||
schedule: "*/5 * * * *",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Valid schedule - specific time",
|
||||
schedule: "0 2 * * *",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Valid schedule - complex",
|
||||
schedule: "0 0 1,15 * 3",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule - wrong format",
|
||||
schedule: "invalid-cron",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule - too many fields",
|
||||
schedule: "* * * * * *",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid schedule - empty string",
|
||||
schedule: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := CheckCronScheduleIsValid(tt.schedule)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func int64Ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
@ -28,213 +28,334 @@ import (
|
||||
)
|
||||
|
||||
func TestIngressAnalyzer(t *testing.T) {
|
||||
validIgClassName := new(string)
|
||||
*validIgClassName = "valid-ingress-class"
|
||||
|
||||
var igRule networkingv1.IngressRule
|
||||
|
||||
httpRule := networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
// This service exists.
|
||||
Name: "Service1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "/test1",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
// This service is in the test namespace
|
||||
// Hence, it won't be discovered.
|
||||
Name: "Service2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "/test2",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
// This service doesn't exist.
|
||||
Name: "Service3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
igRule.IngressRuleValue.HTTP = &httpRule
|
||||
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
// Doesn't specify an ingress class.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress2",
|
||||
Namespace: "default",
|
||||
// Specify an invalid ingress class name using annotations.
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "invalid-class",
|
||||
},
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
// Namespace filtering.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress3",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: *validIgClassName,
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress4",
|
||||
Namespace: "default",
|
||||
// Specify valid ingress class name using annotations.
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": *validIgClassName,
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Namespace filtering.
|
||||
Name: "Service2",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Secret1",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Secret2",
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress5",
|
||||
Namespace: "default",
|
||||
},
|
||||
|
||||
// Specify valid ingress class name in spec.
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: validIgClassName,
|
||||
Rules: []networkingv1.IngressRule{
|
||||
igRule,
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
// This won't contribute to any failures.
|
||||
SecretName: "Secret1",
|
||||
},
|
||||
{
|
||||
// This secret won't be discovered because of namespace filtering.
|
||||
SecretName: "Secret2",
|
||||
},
|
||||
{
|
||||
// This secret doesn't exist.
|
||||
SecretName: "Secret3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
}
|
||||
|
||||
igAnalyzer := IngressAnalyzer{}
|
||||
results, err := igAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
expectations := []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "default/Ingress1",
|
||||
failuresCount: 1,
|
||||
name: "Missing ingress class",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-class",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// No ingress class specified
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/no-class",
|
||||
failuresCount: 1, // One failure for missing ingress class
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default/Ingress2",
|
||||
failuresCount: 1,
|
||||
name: "Non-existent ingress class",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-class",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("non-existent"),
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-class",
|
||||
failuresCount: 1, // One failure for non-existent ingress class
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default/Ingress5",
|
||||
failuresCount: 4,
|
||||
name: "Non-existent backend service",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-backend",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: pathTypePtr(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-backend",
|
||||
failuresCount: 2, // Two failures: non-existent ingress class and non-existent service
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent TLS secret",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-tls",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-tls",
|
||||
failuresCount: 2, // Two failures: non-existent ingress class and non-existent TLS secret
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid ingress with all components",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-ingress",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("nginx"),
|
||||
},
|
||||
},
|
||||
&networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nginx",
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backend-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tls-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for valid ingress
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple issues",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "multiple-issues",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("non-existent"),
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: pathTypePtr(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/multiple-issues",
|
||||
failuresCount: 3, // Three failures: ingress class, service, and TLS secret
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
analyzer := IngressAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i].name, result.Name)
|
||||
require.Equal(t, expectations[i].failuresCount, len(result.Error))
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIngressAnalyzerLabelSelectorFiltering(t *testing.T) {
|
||||
validIgClassName := new(string)
|
||||
*validIgClassName = "valid-ingress-class"
|
||||
func TestIngressAnalyzerLabelSelector(t *testing.T) {
|
||||
clientSet := fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-with-label",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// Missing ingress class to trigger a failure
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-without-label",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// Missing ingress class to trigger a failure
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Test with label selector
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress1",
|
||||
Namespace: "default",
|
||||
Labels: map[string]string{
|
||||
"app": "ingress",
|
||||
},
|
||||
},
|
||||
},
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Ingress2",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
),
|
||||
Client: clientSet,
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
LabelSelector: "app=ingress",
|
||||
LabelSelector: "app=test",
|
||||
}
|
||||
|
||||
igAnalyzer := IngressAnalyzer{}
|
||||
results, err := igAnalyzer.Analyze(config)
|
||||
analyzer := IngressAnalyzer{}
|
||||
results, err := analyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(results))
|
||||
require.Equal(t, "default/Ingress1", results[0].Name)
|
||||
|
||||
require.Equal(t, "default/ingress-with-label", results[0].Name)
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func strPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func pathTypePtr(p networkingv1.PathType) *networkingv1.PathType {
|
||||
return &p
|
||||
}
|
||||
|
@ -24,145 +24,232 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
)
|
||||
|
||||
func TestServiceAnalyzer(t *testing.T) {
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Endpoint1",
|
||||
Namespace: "test",
|
||||
},
|
||||
// Endpoint with non-zero subsets.
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
// These not ready end points will contribute to failures.
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "test-reference",
|
||||
Name: "reference1",
|
||||
},
|
||||
},
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "test-reference",
|
||||
Name: "reference2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// These not ready end points will contribute to failures.
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "test-reference",
|
||||
Name: "reference3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Endpoint2",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{
|
||||
// Leader election record annotation key defined.
|
||||
resourcelock.LeaderElectionRecordAnnotationKey: "this is okay",
|
||||
},
|
||||
},
|
||||
// Endpoint with zero subsets.
|
||||
},
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// This won't contribute to any failures.
|
||||
Name: "non-existent-service",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
// Endpoint with zero subsets.
|
||||
},
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service1",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
// Endpoint with zero subsets.
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service1",
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app1": "test-app1",
|
||||
"app2": "test-app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// This service won't be discovered.
|
||||
Name: "Service2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app1": "test-app1",
|
||||
"app2": "test-app2",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "Service3",
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
// No Spec Selector
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "test",
|
||||
}
|
||||
|
||||
sAnalyzer := ServiceAnalyzer{}
|
||||
results, err := sAnalyzer.Analyze(config)
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
expectations := []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "test/Endpoint1",
|
||||
failuresCount: 1,
|
||||
name: "Service with no endpoints",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/test-service",
|
||||
failuresCount: 1, // One failure for no endpoints
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test/Service1",
|
||||
failuresCount: 2,
|
||||
name: "Service with not ready endpoints",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: "test-pod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/test-service",
|
||||
failuresCount: 1, // One failure for not ready endpoints
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Service with warning events",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-event",
|
||||
Namespace: "default",
|
||||
},
|
||||
InvolvedObject: v1.ObjectReference{
|
||||
Kind: "Service",
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Type: "Warning",
|
||||
Reason: "TestReason",
|
||||
Message: "Test warning message",
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/test-service",
|
||||
failuresCount: 2, // One failure for no endpoints, one for warning event
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Service with leader election annotation",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"control-plane.alpha.kubernetes.io/leader": "test-leader",
|
||||
},
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for leader election endpoints
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Service with non-existent service",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{}, // Empty subsets
|
||||
},
|
||||
),
|
||||
},
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for non-existent service
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectations), len(results))
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
analyzer := ServiceAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
|
||||
for i, result := range results {
|
||||
require.Equal(t, expectations[i].name, result.Name)
|
||||
require.Equal(t, expectations[i].failuresCount, len(result.Error))
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user