mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-09-07 02:01:39 +00:00
feat: add keda integration (#1058)
* refactor: move FetchLatestEvent inside util package Signed-off-by: DragonAlex98 <a.antinori@reply.it> * feat: add Keda integration and ScaledObject analyzer Signed-off-by: DragonAlex98 <a.antinori@reply.it> --------- Signed-off-by: DragonAlex98 <a.antinori@reply.it>
This commit is contained in:
3
go.mod
3
go.mod
@@ -5,6 +5,7 @@ go 1.21
|
||||
require (
|
||||
github.com/aquasecurity/trivy-operator v0.17.1
|
||||
github.com/fatih/color v1.16.0
|
||||
github.com/kedacore/keda/v2 v2.11.2
|
||||
github.com/magiconair/properties v1.8.7
|
||||
github.com/mittwald/go-helm-client v0.12.5
|
||||
github.com/sashabaranov/go-openai v1.20.4
|
||||
@@ -92,11 +93,13 @@ require (
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
knative.dev/pkg v0.0.0-20230616134650-eb63a40adfb0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
|
4
go.sum
4
go.sum
@@ -1799,6 +1799,8 @@ github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kedacore/keda/v2 v2.11.2 h1:UgPww0NREqUkM1PGERUz+eb5PlO5oU8V/sT9Hh+ZD60=
|
||||
github.com/kedacore/keda/v2 v2.11.2/go.mod h1:eutYX+QXTi3QH90F7JvY3tYtV5Jq10o5f56Chk5IVF8=
|
||||
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
|
||||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
@@ -3048,6 +3050,8 @@ k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ=
|
||||
k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c=
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/pkg v0.0.0-20230616134650-eb63a40adfb0 h1:weQWWxEEbNOPuL4qtGiBZuMSFhcjF/Cu163uktd/xFE=
|
||||
knative.dev/pkg v0.0.0-20230616134650-eb63a40adfb0/go.mod h1:dqC6IrvyBE7E+oZocs5PkVhq1G59pDTA7r8U17EAKMk=
|
||||
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
|
||||
|
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func FetchLatestEvent(ctx context.Context, kubernetesClient *kubernetes.Client, namespace string, name string) (*v1.Event, error) {
|
||||
|
||||
// get the list of events
|
||||
events, err := kubernetesClient.GetClient().CoreV1().Events(namespace).List(ctx,
|
||||
metav1.ListOptions{
|
||||
FieldSelector: "involvedObject.name=" + name,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// find most recent event
|
||||
var latestEvent *v1.Event
|
||||
for _, event := range events.Items {
|
||||
if latestEvent == nil {
|
||||
// this is required, as a pointer to a loop variable would always yield the latest value in the range
|
||||
e := event
|
||||
latestEvent = &e
|
||||
}
|
||||
if event.LastTimestamp.After(latestEvent.LastTimestamp.Time) {
|
||||
// this is required, as a pointer to a loop variable would always yield the latest value in the range
|
||||
e := event
|
||||
latestEvent = &e
|
||||
}
|
||||
}
|
||||
return latestEvent, nil
|
||||
}
|
@@ -97,7 +97,7 @@ func analyzeContainerStatusFailures(a common.Analyzer, statuses []v1.ContainerSt
|
||||
if containerStatus.State.Waiting.Reason == "ContainerCreating" && statusPhase == "Pending" {
|
||||
// This represents a container that is still being created or blocked due to conditions such as OOMKilled
|
||||
// parse the event log and append details
|
||||
evt, err := FetchLatestEvent(a.Context, a.Client, namespace, name)
|
||||
evt, err := util.FetchLatestEvent(a.Context, a.Client, namespace, name)
|
||||
if err != nil || evt == nil {
|
||||
continue
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func analyzeContainerStatusFailures(a common.Analyzer, statuses []v1.ContainerSt
|
||||
// when pod is Running but its ReadinessProbe fails
|
||||
if !containerStatus.Ready && statusPhase == "Running" {
|
||||
// parse the event log and append details
|
||||
evt, err := FetchLatestEvent(a.Context, a.Client, namespace, name)
|
||||
evt, err := util.FetchLatestEvent(a.Context, a.Client, namespace, name)
|
||||
if err != nil || evt == nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -47,7 +47,7 @@ func (PvcAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
if pvc.Status.Phase == appsv1.ClaimPending {
|
||||
|
||||
// parse the event log and append details
|
||||
evt, err := FetchLatestEvent(a.Context, a.Client, pvc.Namespace, pvc.Name)
|
||||
evt, err := util.FetchLatestEvent(a.Context, a.Client, pvc.Namespace, pvc.Name)
|
||||
if err != nil || evt == nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
keda "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
|
||||
regv1 "k8s.io/api/admissionregistration/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autov1 "k8s.io/api/autoscaling/v1"
|
||||
@@ -62,6 +63,7 @@ type PreAnalysis struct {
|
||||
Gateway gtwapi.Gateway
|
||||
HTTPRoute gtwapi.HTTPRoute
|
||||
// Integrations
|
||||
ScaledObject keda.ScaledObject
|
||||
TrivyVulnerabilityReport trivy.VulnerabilityReport
|
||||
TrivyConfigAuditReport trivy.ConfigAuditReport
|
||||
}
|
||||
|
@@ -16,9 +16,11 @@ package integration
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration/aws"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration/keda"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration/prometheus"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration/trivy"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
@@ -49,6 +51,7 @@ var integrations = map[string]IIntegration{
|
||||
"trivy": trivy.NewTrivy(),
|
||||
"prometheus": prometheus.NewPrometheus(),
|
||||
"aws": aws.NewAWS(),
|
||||
"keda": keda.NewKeda(),
|
||||
}
|
||||
|
||||
func NewIntegration() *Integration {
|
||||
|
229
pkg/integration/keda/keda.go
Normal file
229
pkg/integration/keda/keda.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package keda
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/kedacore/keda/v2/pkg/generated/clientset/versioned/typed/keda/v1alpha1"
|
||||
helmclient "github.com/mittwald/go-helm-client"
|
||||
"github.com/spf13/viper"
|
||||
"helm.sh/helm/v3/pkg/repo"
|
||||
)
|
||||
|
||||
var (
|
||||
Repo = getEnv("KEDA_REPO", "https://kedacore.github.io/charts")
|
||||
Version = getEnv("KEDA_VERSION", "2.11.2")
|
||||
ChartName = getEnv("KEDA_CHART_NAME", "keda")
|
||||
RepoShortName = getEnv("KEDA_REPO_SHORT_NAME", "keda")
|
||||
ReleaseName = getEnv("KEDA_RELEASE_NAME", "keda-k8sgpt")
|
||||
)
|
||||
|
||||
type Keda struct {
|
||||
helm helmclient.Client
|
||||
}
|
||||
|
||||
func getEnv(key, defaultValue string) string {
|
||||
value := os.Getenv(key)
|
||||
if value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func NewKeda() *Keda {
|
||||
helmClient, err := helmclient.New(&helmclient.Options{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &Keda{
|
||||
helm: helmClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Keda) Deploy(namespace string) error {
|
||||
// Add the repository
|
||||
chartRepo := repo.Entry{
|
||||
Name: RepoShortName,
|
||||
URL: Repo,
|
||||
}
|
||||
// Add a chart-repository to the client.
|
||||
if err := k.helm.AddOrUpdateChartRepo(chartRepo); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
chartSpec := helmclient.ChartSpec{
|
||||
ReleaseName: ReleaseName,
|
||||
ChartName: fmt.Sprintf("%s/%s", RepoShortName, ChartName),
|
||||
Namespace: namespace,
|
||||
|
||||
//TODO: All of this should be configurable
|
||||
UpgradeCRDs: true,
|
||||
Wait: false,
|
||||
Timeout: 300,
|
||||
CreateNamespace: true,
|
||||
}
|
||||
|
||||
// Install a chart release.
|
||||
// Note that helmclient.Options.Namespace should ideally match the namespace in chartSpec.Namespace.
|
||||
if _, err := k.helm.InstallOrUpgradeChart(context.Background(), &chartSpec, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Keda) UnDeploy(namespace string) error {
|
||||
kubecontext := viper.GetString("kubecontext")
|
||||
kubeconfig := viper.GetString("kubeconfig")
|
||||
client, err := kubernetes.NewClient(kubecontext, kubeconfig)
|
||||
if err != nil {
|
||||
// TODO: better error handling
|
||||
color.Red("Error initialising kubernetes client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
kedaNamespace, _ := k.GetNamespace()
|
||||
color.Blue(fmt.Sprintf("Keda namespace: %s\n", kedaNamespace))
|
||||
|
||||
kClient, _ := v1alpha1.NewForConfig(client.Config)
|
||||
|
||||
scaledObjectList, _ := kClient.ScaledObjects("").List(context.Background(), v1.ListOptions{})
|
||||
scaledJobList, _ := kClient.ScaledJobs("").List(context.Background(), v1.ListOptions{})
|
||||
triggerAuthenticationList, _ := kClient.TriggerAuthentications("").List(context.Background(), v1.ListOptions{})
|
||||
clusterTriggerAuthenticationsList, _ := kClient.ClusterTriggerAuthentications().List(context.Background(), v1.ListOptions{})
|
||||
|
||||
// Before uninstalling the Helm chart, we need to delete Keda resources
|
||||
for _, scaledObject := range scaledObjectList.Items {
|
||||
err := kClient.ScaledObjects(scaledObject.Namespace).Delete(context.Background(), scaledObject.Name, v1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("Error deleting scaledObject %s: %v\n", scaledObject.Name, err)
|
||||
} else {
|
||||
fmt.Printf("Deleted scaledObject %s in namespace %s\n", scaledObject.Name, scaledObject.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
for _, scaledJob := range scaledJobList.Items {
|
||||
err := kClient.ScaledJobs(scaledJob.Namespace).Delete(context.Background(), scaledJob.Name, v1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("Error deleting scaledJob %s: %v\n", scaledJob.Name, err)
|
||||
} else {
|
||||
fmt.Printf("Deleted scaledJob %s in namespace %s\n", scaledJob.Name, scaledJob.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
for _, triggerAuthentication := range triggerAuthenticationList.Items {
|
||||
err := kClient.TriggerAuthentications(triggerAuthentication.Namespace).Delete(context.Background(), triggerAuthentication.Name, v1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("Error deleting triggerAuthentication %s: %v\n", triggerAuthentication.Name, err)
|
||||
} else {
|
||||
fmt.Printf("Deleted triggerAuthentication %s in namespace %s\n", triggerAuthentication.Name, triggerAuthentication.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
for _, clusterTriggerAuthentication := range clusterTriggerAuthenticationsList.Items {
|
||||
err := kClient.ClusterTriggerAuthentications().Delete(context.Background(), clusterTriggerAuthentication.Name, v1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("Error deleting clusterTriggerAuthentication %s: %v\n", clusterTriggerAuthentication.Name, err)
|
||||
} else {
|
||||
fmt.Printf("Deleted clusterTriggerAuthentication %s\n", clusterTriggerAuthentication.Name)
|
||||
}
|
||||
}
|
||||
|
||||
chartSpec := helmclient.ChartSpec{
|
||||
ReleaseName: ReleaseName,
|
||||
ChartName: fmt.Sprintf("%s/%s", RepoShortName, ChartName),
|
||||
Namespace: namespace,
|
||||
UpgradeCRDs: true,
|
||||
Wait: false,
|
||||
Timeout: 300,
|
||||
}
|
||||
// Uninstall the chart release.
|
||||
// Note that helmclient.Options.Namespace should ideally match the namespace in chartSpec.Namespace.
|
||||
if err := k.helm.UninstallRelease(&chartSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Keda) AddAnalyzer(mergedMap *map[string]common.IAnalyzer) {
|
||||
(*mergedMap)["ScaledObject"] = &ScaledObjectAnalyzer{}
|
||||
}
|
||||
|
||||
func (k *Keda) GetAnalyzerName() []string {
|
||||
return []string{
|
||||
"ScaledObject",
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Keda) GetNamespace() (string, error) {
|
||||
releases, err := k.helm.ListDeployedReleases()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, rel := range releases {
|
||||
if rel.Name == ReleaseName {
|
||||
return rel.Namespace, nil
|
||||
}
|
||||
}
|
||||
return "", status.Error(codes.NotFound, "keda release not found")
|
||||
}
|
||||
|
||||
func (k *Keda) OwnsAnalyzer(analyzer string) bool {
|
||||
for _, a := range k.GetAnalyzerName() {
|
||||
if analyzer == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *Keda) isFilterActive() bool {
|
||||
activeFilters := viper.GetStringSlice("active_filters")
|
||||
|
||||
for _, filter := range k.GetAnalyzerName() {
|
||||
for _, af := range activeFilters {
|
||||
if af == filter {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *Keda) isDeployed() bool {
|
||||
kubecontext := viper.GetString("kubecontext")
|
||||
kubeconfig := viper.GetString("kubeconfig")
|
||||
client, err := kubernetes.NewClient(kubecontext, kubeconfig)
|
||||
if err != nil {
|
||||
// TODO: better error handling
|
||||
color.Red("Error initialising kubernetes client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
groups, _, err := client.Client.Discovery().ServerGroupsAndResources()
|
||||
if err != nil {
|
||||
// TODO: better error handling
|
||||
color.Red("Error initialising discovery client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
if group.Name == "keda.sh" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (k *Keda) IsActivate() bool {
|
||||
return k.isFilterActive() && k.isDeployed()
|
||||
}
|
193
pkg/integration/keda/scaledobject_analyzer.go
Normal file
193
pkg/integration/keda/scaledobject_analyzer.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package keda
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
kedaSchema "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
|
||||
"github.com/kedacore/keda/v2/pkg/generated/clientset/versioned/typed/keda/v1alpha1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ScaledObjectAnalyzer struct{}
|
||||
|
||||
func (s *ScaledObjectAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
kClient, _ := v1alpha1.NewForConfig(a.Client.GetConfig())
|
||||
kind := "ScaledObject"
|
||||
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: kedaSchema.GroupVersion,
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
list, err := kClient.ScaledObjects(a.Namespace).List(a.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var preAnalysis = map[string]common.PreAnalysis{}
|
||||
|
||||
for _, so := range list.Items {
|
||||
var failures []common.Failure
|
||||
|
||||
scaleTargetRef := so.Spec.ScaleTargetRef
|
||||
if scaleTargetRef.Kind == "" {
|
||||
scaleTargetRef.Kind = "Deployment"
|
||||
}
|
||||
|
||||
var podInfo PodInfo
|
||||
|
||||
switch scaleTargetRef.Kind {
|
||||
case "Deployment":
|
||||
deployment, err := a.Client.GetClient().AppsV1().Deployments(so.Namespace).Get(a.Context, scaleTargetRef.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
podInfo = DeploymentInfo{deployment}
|
||||
}
|
||||
case "ReplicationController":
|
||||
rc, err := a.Client.GetClient().CoreV1().ReplicationControllers(so.Namespace).Get(a.Context, scaleTargetRef.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
podInfo = ReplicationControllerInfo{rc}
|
||||
}
|
||||
case "ReplicaSet":
|
||||
rs, err := a.Client.GetClient().AppsV1().ReplicaSets(so.Namespace).Get(a.Context, scaleTargetRef.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
podInfo = ReplicaSetInfo{rs}
|
||||
}
|
||||
case "StatefulSet":
|
||||
ss, err := a.Client.GetClient().AppsV1().StatefulSets(so.Namespace).Get(a.Context, scaleTargetRef.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
podInfo = StatefulSetInfo{ss}
|
||||
}
|
||||
default:
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ScaledObject uses %s as ScaleTargetRef which is not an option.", scaleTargetRef.Kind),
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
|
||||
if podInfo == nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.scaleTargetRef")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("ScaledObject uses %s/%s as ScaleTargetRef which does not exist.", scaleTargetRef.Kind, scaleTargetRef.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: scaleTargetRef.Name,
|
||||
Masked: util.MaskString(scaleTargetRef.Name),
|
||||
},
|
||||
},
|
||||
})
|
||||
} else {
|
||||
containers := len(podInfo.GetPodSpec().Containers)
|
||||
for _, container := range podInfo.GetPodSpec().Containers {
|
||||
for _, trigger := range so.Spec.Triggers {
|
||||
if trigger.Type == "cpu" || trigger.Type == "memory" {
|
||||
if container.Resources.Requests == nil || container.Resources.Limits == nil {
|
||||
containers--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if containers <= 0 {
|
||||
doc := apiDoc.GetApiDocV2("spec.scaleTargetRef.kind")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("%s %s/%s does not have resource configured.", scaleTargetRef.Kind, so.Namespace, scaleTargetRef.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: scaleTargetRef.Name,
|
||||
Masked: util.MaskString(scaleTargetRef.Name),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
evt, err := util.FetchLatestEvent(a.Context, a.Client, so.Namespace, so.Name)
|
||||
if err != nil || evt == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if evt.Type != "Normal" {
|
||||
failures = append(failures, common.Failure{
|
||||
Text: evt.Message,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: scaleTargetRef.Name,
|
||||
Masked: util.MaskString(scaleTargetRef.Name),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
preAnalysis[fmt.Sprintf("%s/%s", so.Namespace, so.Name)] = common.PreAnalysis{
|
||||
ScaledObject: so,
|
||||
FailureDetails: failures,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range preAnalysis {
|
||||
var currentAnalysis = common.Result{
|
||||
Kind: kind,
|
||||
Name: key,
|
||||
Error: value.FailureDetails,
|
||||
}
|
||||
|
||||
parent, _ := util.GetParent(a.Client, value.ScaledObject.ObjectMeta)
|
||||
currentAnalysis.ParentObject = parent
|
||||
a.Results = append(a.Results, currentAnalysis)
|
||||
}
|
||||
|
||||
return a.Results, nil
|
||||
}
|
||||
|
||||
type PodInfo interface {
|
||||
GetPodSpec() corev1.PodSpec
|
||||
}
|
||||
|
||||
type DeploymentInfo struct {
|
||||
*appsv1.Deployment
|
||||
}
|
||||
|
||||
func (d DeploymentInfo) GetPodSpec() corev1.PodSpec {
|
||||
return d.Spec.Template.Spec
|
||||
}
|
||||
|
||||
// define a structure for ReplicationController
|
||||
type ReplicationControllerInfo struct {
|
||||
*corev1.ReplicationController
|
||||
}
|
||||
|
||||
func (rc ReplicationControllerInfo) GetPodSpec() corev1.PodSpec {
|
||||
return rc.Spec.Template.Spec
|
||||
}
|
||||
|
||||
// define a structure for ReplicaSet
|
||||
type ReplicaSetInfo struct {
|
||||
*appsv1.ReplicaSet
|
||||
}
|
||||
|
||||
func (rs ReplicaSetInfo) GetPodSpec() corev1.PodSpec {
|
||||
return rs.Spec.Template.Spec
|
||||
}
|
||||
|
||||
// define a structure for StatefulSet
|
||||
type StatefulSetInfo struct {
|
||||
*appsv1.StatefulSet
|
||||
}
|
||||
|
||||
// implement PodInfo for StatefulSetInfo
|
||||
func (ss StatefulSetInfo) GetPodSpec() corev1.PodSpec {
|
||||
return ss.Spec.Template.Spec
|
||||
}
|
@@ -233,3 +233,31 @@ func LabelsIncludeAny(predefinedSelector, Labels map[string]string) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func FetchLatestEvent(ctx context.Context, kubernetesClient *kubernetes.Client, namespace string, name string) (*v1.Event, error) {
|
||||
|
||||
// get the list of events
|
||||
events, err := kubernetesClient.GetClient().CoreV1().Events(namespace).List(ctx,
|
||||
metav1.ListOptions{
|
||||
FieldSelector: "involvedObject.name=" + name,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// find most recent event
|
||||
var latestEvent *v1.Event
|
||||
for _, event := range events.Items {
|
||||
if latestEvent == nil {
|
||||
// this is required, as a pointer to a loop variable would always yield the latest value in the range
|
||||
e := event
|
||||
latestEvent = &e
|
||||
}
|
||||
if event.LastTimestamp.After(latestEvent.LastTimestamp.Time) {
|
||||
// this is required, as a pointer to a loop variable would always yield the latest value in the range
|
||||
e := event
|
||||
latestEvent = &e
|
||||
}
|
||||
}
|
||||
return latestEvent, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user