mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-06-24 07:14:15 +00:00
🔥 Remove config map and image pull checks
This commit is contained in:
parent
38da25ecc8
commit
f128ae3993
@ -1,123 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "image-pull-in-cluster").Msg("Checking:")
|
||||
|
||||
namespace := "default"
|
||||
podName := fmt.Sprintf("%s-test", misc.Program)
|
||||
|
||||
defer func() {
|
||||
if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil {
|
||||
log.Error().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Err(err).
|
||||
Msg("While removing test pod!")
|
||||
}
|
||||
}()
|
||||
|
||||
if err := createImagePullInClusterPod(ctx, kubernetesProvider, namespace, podName); err != nil {
|
||||
log.Error().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Err(err).
|
||||
Msg("While creating test pod!")
|
||||
return false
|
||||
}
|
||||
|
||||
if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil {
|
||||
log.Printf("%v cluster is not able to pull %s containers from docker hub, err: %v", misc.Program, fmt.Sprintf(utils.Red, "✗"), err)
|
||||
log.Error().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Err(err).
|
||||
Msg("Unable to pull images from Docker Hub!")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Msg("Pulling images from Docker Hub is passed.")
|
||||
return true
|
||||
}
|
||||
|
||||
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{namespace}, podWatchHelper)
|
||||
|
||||
timeAfter := time.After(30 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Status.Phase == core.PodRunning {
|
||||
return nil
|
||||
}
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
case <-timeAfter:
|
||||
return fmt.Errorf("image not pulled in time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
|
||||
image := docker.GetWorkerImage()
|
||||
log.Info().Str("image", image).Msg("Testing image pull:")
|
||||
var zero int64
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "probe",
|
||||
Image: image,
|
||||
ImagePullPolicy: "Always",
|
||||
Command: []string{"cat"},
|
||||
Stdin: true,
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := kubernetesProvider.CreatePod(ctx, namespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -15,9 +15,6 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.SelfNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.SelfNamespace, "namespace", exist, err)
|
||||
|
||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.SelfNamespace, kubernetes.ConfigMapName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.SelfNamespace, kubernetes.ServiceAccountName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||
|
||||
|
@ -33,9 +33,6 @@ func runCheck() {
|
||||
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
|
||||
}
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
|
||||
}
|
||||
|
@ -14,7 +14,6 @@ const (
|
||||
ServiceAccountName = SelfResourcesPrefix + "service-account"
|
||||
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
|
||||
WorkerPodName = SelfResourcesPrefix + "worker"
|
||||
ConfigMapName = SelfResourcesPrefix + "config"
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
)
|
||||
|
||||
|
@ -182,9 +182,6 @@ type PodOptions struct {
|
||||
}
|
||||
|
||||
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
||||
configMapVolume := &core.ConfigMapVolumeSource{}
|
||||
configMapVolume.Name = ConfigMapName
|
||||
|
||||
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
|
||||
@ -264,9 +261,6 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
||||
}
|
||||
|
||||
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
|
||||
configMapVolume := &core.ConfigMapVolumeSource{}
|
||||
configMapVolume.Name = ConfigMapName
|
||||
|
||||
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
|
||||
@ -419,11 +413,6 @@ func (provider *Provider) DoesNamespaceExist(ctx context.Context, name string) (
|
||||
return provider.doesResourceExist(namespaceResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesConfigMapExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
configMapResource, err := provider.clientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(configMapResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesServiceAccountExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
serviceAccountResource, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(serviceAccountResource, err)
|
||||
|
@ -113,11 +113,6 @@ func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.P
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveConfigMap(ctx, selfResourcesNamespace, kubernetes.ConfigMapName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ConfigMap %s in namespace %s", kubernetes.ConfigMapName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
|
Loading…
Reference in New Issue
Block a user