From 5dafc015bb5be7745f6a06dc31e9da0094721546 Mon Sep 17 00:00:00 2001 From: "M. Mert Yildiran" Date: Tue, 11 Apr 2023 01:54:06 +0300 Subject: [PATCH] :sparkles: Add `manifests` command to generate Kubernetes manifests --- cmd/cleanRunner.go | 2 +- cmd/common.go | 6 +- cmd/logs.go | 2 +- cmd/manifests.go | 178 +++++++++++++++++++++++++++++++++ cmd/proxyRunner.go | 2 +- cmd/tapRunner.go | 2 +- config/config.go | 24 ++++- config/configStruct.go | 5 + kubernetes/provider.go | 189 ++++++++++++++++++----------------- resources/createResources.go | 39 ++------ utils/pretty.go | 15 +++ 11 files changed, 331 insertions(+), 133 deletions(-) create mode 100644 cmd/manifests.go diff --git a/cmd/cleanRunner.go b/cmd/cleanRunner.go index fea6cb846..cb019e980 100644 --- a/cmd/cleanRunner.go +++ b/cmd/cleanRunner.go @@ -5,7 +5,7 @@ import ( ) func performCleanCommand() { - kubernetesProvider, err := getKubernetesProviderForCli() + kubernetesProvider, err := getKubernetesProviderForCli(false) if err != nil { return } diff --git a/cmd/common.go b/cmd/common.go index ecc9a94b6..796c95991 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -58,7 +58,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con } } -func getKubernetesProviderForCli() (*kubernetes.Provider, error) { +func getKubernetesProviderForCli(silent bool) (*kubernetes.Provider, error) { kubeConfigPath := config.Config.KubeConfigPath() kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context) if err != nil { @@ -66,7 +66,9 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) { return nil, err } - log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:") + if !silent { + log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:") + } if err := kubernetesProvider.ValidateNotProxy(); err != nil { handleKubernetesProviderError(err) diff --git a/cmd/logs.go b/cmd/logs.go index 2b0bacfd6..3ac774c0e 100644 --- a/cmd/logs.go +++ b/cmd/logs.go @@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{ Use: "logs", Short: "Create a ZIP file with logs for GitHub issues or troubleshooting", RunE: func(cmd *cobra.Command, args []string) error { - kubernetesProvider, err := getKubernetesProviderForCli() + kubernetesProvider, err := getKubernetesProviderForCli(false) if err != nil { return nil } diff --git a/cmd/manifests.go b/cmd/manifests.go new file mode 100644 index 000000000..b1037bbb0 --- /dev/null +++ b/cmd/manifests.go @@ -0,0 +1,178 @@ +package cmd + +import ( + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/creasty/defaults" + "github.com/kubeshark/kubeshark/config" + "github.com/kubeshark/kubeshark/docker" + "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +const manifestSeperator = "---" + +var manifestsCmd = &cobra.Command{ + Use: "manifests", + Short: "Generate Kubernetes manifests of Kubeshark", + RunE: func(cmd *cobra.Command, args []string) error { + runManifests() + return nil + }, +} + +func init() { + rootCmd.AddCommand(manifestsCmd) + + defaultManifestsConfig := config.ManifestsConfig{} + if err := defaults.Set(&defaultManifestsConfig); err != nil { + log.Debug().Err(err).Send() + } + + manifestsCmd.Flags().Bool("dump", defaultManifestsConfig.Dump, "Enable the debug mode") +} + +func runManifests() { + kubernetesProvider, err := getKubernetesProviderForCli(true) + if err != nil { + log.Error().Err(err).Send() + return + } + + namespace := kubernetesProvider.BuildNamespace(config.Config.Tap.SelfNamespace) + + serviceAccount := kubernetesProvider.BuildServiceAccount() + + clusterRole := kubernetesProvider.BuildClusterRole() + + clusterRoleBinding := kubernetesProvider.BuildClusterRoleBinding() + + hubPod, err := kubernetesProvider.BuildHubPod(&kubernetes.PodOptions{ + Namespace: config.Config.Tap.SelfNamespace, + PodName: kubernetes.HubPodName, + PodImage: docker.GetHubImage(), + ServiceAccountName: kubernetes.ServiceAccountName, + Resources: config.Config.Tap.Resources.Hub, + ImagePullPolicy: config.Config.ImagePullPolicy(), + ImagePullSecrets: config.Config.ImagePullSecrets(), + Debug: config.Config.Tap.Debug, + }) + if err != nil { + log.Error().Err(err).Send() + return + } + + hubService := kubernetesProvider.BuildHubService(config.Config.Tap.SelfNamespace) + + frontPod, err := kubernetesProvider.BuildFrontPod(&kubernetes.PodOptions{ + Namespace: config.Config.Tap.SelfNamespace, + PodName: kubernetes.FrontPodName, + PodImage: docker.GetHubImage(), + ServiceAccountName: kubernetes.ServiceAccountName, + Resources: config.Config.Tap.Resources.Hub, + ImagePullPolicy: config.Config.ImagePullPolicy(), + ImagePullSecrets: config.Config.ImagePullSecrets(), + Debug: config.Config.Tap.Debug, + }, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort)) + if err != nil { + log.Error().Err(err).Send() + return + } + + frontService := kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace) + + workerDaemonSet, err := kubernetesProvider.BuildWorkerDaemonSet( + kubernetes.WorkerDaemonSetName, + kubernetes.WorkerPodName, + kubernetes.ServiceAccountName, + config.Config.Tap.Resources.Worker, + config.Config.ImagePullPolicy(), + config.Config.ImagePullSecrets(), + config.Config.Tap.ServiceMesh, + config.Config.Tap.Tls, + config.Config.Tap.Debug, + ) + if err != nil { + log.Error().Err(err).Send() + return + } + + if config.Config.Manifests.Dump { + err = dumpManifests(map[string]interface{}{ + "00-namespace.yaml": namespace, + "01-service-account.yaml": serviceAccount, + "02-cluster-role.yaml": clusterRole, + "03-cluster-role-binding.yaml": clusterRoleBinding, + "04-hub-pod.yaml": hubPod, + "05-hub-service.yaml": hubService, + "06-front-pod.yaml": frontPod, + "07-front-service.yaml": frontService, + "08-worker-daemon-set.yaml": workerDaemonSet, + }) + } else { + err = printManifests([]interface{}{ + namespace, + serviceAccount, + clusterRole, + clusterRoleBinding, + hubPod, + hubService, + frontPod, + frontService, + workerDaemonSet, + }) + } + if err != nil { + log.Error().Err(err).Send() + return + } +} + +func dumpManifests(objects map[string]interface{}) error { + folder := filepath.Join(".", "manifests") + err := os.MkdirAll(folder, os.ModePerm) + if err != nil { + return err + } + + // Sort by filenames + filenames := make([]string, 0) + for filename := range objects { + filenames = append(filenames, filename) + } + sort.Strings(filenames) + + for _, filename := range filenames { + manifest, err := utils.PrettyYamlOmitEmpty(objects[filename]) + if err != nil { + return err + } + + path := filepath.Join(folder, filename) + err = os.WriteFile(path, []byte(manifest), 0644) + if err != nil { + return err + } + log.Info().Msgf("Manifest generated: %s", path) + } + + return nil +} + +func printManifests(objects []interface{}) error { + for _, object := range objects { + manifest, err := utils.PrettyYamlOmitEmpty(object) + if err != nil { + return err + } + fmt.Println(manifestSeperator) + fmt.Println(manifest) + } + + return nil +} diff --git a/cmd/proxyRunner.go b/cmd/proxyRunner.go index 099e2ee23..c5d55a2a9 100644 --- a/cmd/proxyRunner.go +++ b/cmd/proxyRunner.go @@ -15,7 +15,7 @@ import ( ) func runProxy(block bool, noBrowser bool) { - kubernetesProvider, err := getKubernetesProviderForCli() + kubernetesProvider, err := getKubernetesProviderForCli(false) if err != nil { return } diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index 21118bbd8..500e08048 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -62,7 +62,7 @@ func tap() { connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout) - kubernetesProvider, err := getKubernetesProviderForCli() + kubernetesProvider, err := getKubernetesProviderForCli(false) if err != nil { return } diff --git a/config/config.go b/config/config.go index 1fb2de7a0..59d1eabaf 100644 --- a/config/config.go +++ b/config/config.go @@ -52,13 +52,23 @@ func InitConfig(cmd *cobra.Command) error { return nil } - if cmd.Use != "console" && cmd.Use != "pro" { + if !utils.Contains([]string{ + "console", + "pro", + "manifests", + }, cmd.Use) { go version.CheckNewerVersion() } Config = CreateDefaultConfig() cmdName = cmd.Name() - if utils.Contains([]string{"clean", "console", "pro", "proxy", "scripts"}, cmdName) { + if utils.Contains([]string{ + "clean", + "console", + "pro", + "proxy", + "scripts", + }, cmdName) { cmdName = "tap" } @@ -67,7 +77,9 @@ func InitConfig(cmd *cobra.Command) error { } ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml") - if err := loadConfigFile(&Config); err != nil { + if err := loadConfigFile(&Config, utils.Contains([]string{ + "manifests", + }, cmd.Use)); err != nil { if !os.IsNotExist(err) { return fmt.Errorf("invalid config, %w\n"+ "you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, ConfigFilePath) @@ -115,7 +127,7 @@ func WriteConfig(config *ConfigStruct) error { return nil } -func loadConfigFile(config *ConfigStruct) error { +func loadConfigFile(config *ConfigStruct, silent bool) error { cwd, err := os.Getwd() if err != nil { return err @@ -141,7 +153,9 @@ func loadConfigFile(config *ConfigStruct) error { return err } - log.Info().Str("path", ConfigFilePath).Msg("Found config file!") + if !silent { + log.Info().Str("path", ConfigFilePath).Msg("Found config file!") + } return nil } diff --git a/config/configStruct.go b/config/configStruct.go index 3a23dd94d..3c5bfde78 100644 --- a/config/configStruct.go +++ b/config/configStruct.go @@ -23,6 +23,10 @@ type KubeConfig struct { Context string `yaml:"context"` } +type ManifestsConfig struct { + Dump bool `yaml:"dump"` +} + type ConfigStruct struct { Tap configStructs.TapConfig `yaml:"tap"` Logs configStructs.LogsConfig `yaml:"logs"` @@ -34,6 +38,7 @@ type ConfigStruct struct { Scripting configStructs.ScriptingConfig `yaml:"scripting"` ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"` NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"` + Manifests ManifestsConfig `yaml:"manifests,omitempty"` } func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy { diff --git a/kubernetes/provider.go b/kubernetes/provider.go index 6326776eb..175a9ae10 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -156,14 +156,20 @@ func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name str return err } -func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*core.Namespace, error) { - namespaceSpec := &core.Namespace{ +func (provider *Provider) BuildNamespace(name string) *core.Namespace { + return &core.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: buildWithDefaultLabels(map[string]string{}, provider), }, } - return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{}) +} + +func (provider *Provider) CreateNamespace(ctx context.Context, namespace *core.Namespace) (*core.Namespace, error) { + return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) } type PodOptions struct { @@ -223,6 +229,9 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) { } pod := &core.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, ObjectMeta: metav1.ObjectMeta{ Name: opts.PodName, Labels: buildWithDefaultLabels(map[string]string{ @@ -230,6 +239,7 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) { }, provider), }, Spec: core.PodSpec{ + ServiceAccountName: ServiceAccountName, Containers: containers, DNSPolicy: core.DNSClusterFirstWithHostNet, TerminationGracePeriodSeconds: new(int64), @@ -257,10 +267,6 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) { } } - //define the service account only when it exists to prevent pod crash - if opts.ServiceAccountName != "" { - pod.Spec.ServiceAccountName = opts.ServiceAccountName - } return pod, nil } @@ -330,6 +336,9 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor } pod := &core.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, ObjectMeta: metav1.ObjectMeta{ Name: opts.PodName, Labels: buildWithDefaultLabels(map[string]string{ @@ -337,6 +346,7 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor }, provider), }, Spec: core.PodSpec{ + ServiceAccountName: ServiceAccountName, Containers: containers, Volumes: volumes, DNSPolicy: core.DNSClusterFirstWithHostNet, @@ -365,10 +375,6 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor } } - //define the service account only when it exists to prevent pod crash - if opts.ServiceAccountName != "" { - pod.Spec.ServiceAccountName = opts.ServiceAccountName - } return pod, nil } @@ -376,25 +382,54 @@ func (provider *Provider) CreatePod(ctx context.Context, namespace string, podSp return provider.clientSet.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) } -func (provider *Provider) CreateService(ctx context.Context, namespace string, serviceName string, appLabelValue string, targetPort int, port int32) (*core.Service, error) { - service := core.Service{ +func (provider *Provider) BuildHubService(namespace string) *core.Service { + return &core.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, + Name: HubServiceName, Labels: buildWithDefaultLabels(map[string]string{}, provider), }, Spec: core.ServiceSpec{ Ports: []core.ServicePort{ { - Name: serviceName, - TargetPort: intstr.FromInt(targetPort), - Port: port, + Name: HubServiceName, + TargetPort: intstr.FromInt(80), + Port: 80, }, }, Type: core.ServiceTypeClusterIP, - Selector: map[string]string{"app": appLabelValue}, + Selector: map[string]string{"app": HubServiceName}, }, } - return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{}) +} + +func (provider *Provider) BuildFrontService(namespace string) *core.Service { + return &core.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: FrontServiceName, + Labels: buildWithDefaultLabels(map[string]string{}, provider), + }, + Spec: core.ServiceSpec{ + Ports: []core.ServicePort{ + { + Name: FrontServiceName, + TargetPort: intstr.FromInt(80), + Port: int32(config.Config.Tap.Proxy.Front.DstPort), + }, + }, + Type: core.ServiceTypeClusterIP, + Selector: map[string]string{"app": FrontServiceName}, + }, + } +} + +func (provider *Provider) CreateService(ctx context.Context, namespace string, service *core.Service) (*core.Service, error) { + return provider.clientSet.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{}) } func (provider *Provider) CanI(ctx context.Context, namespace string, resource string, verb string, group string) (bool, error) { @@ -465,79 +500,29 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo return resource != nil, nil } -func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error { - serviceAccount := &core.ServiceAccount{ +func (provider *Provider) BuildServiceAccount() *core.ServiceAccount { + return &core.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceAccount", + }, ObjectMeta: metav1.ObjectMeta{ - Name: serviceAccountName, + Name: ServiceAccountName, Labels: buildWithDefaultLabels(map[string]string{ - fmt.Sprintf("%s-cli-version", misc.Program): version, + fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion, }, provider), }, } - clusterRole := &rbac.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterRoleName, - Labels: buildWithDefaultLabels(map[string]string{ - fmt.Sprintf("%s-cli-version", misc.Program): version, - }, provider), - }, - Rules: []rbac.PolicyRule{ - { - APIGroups: []string{"", "extensions", "apps"}, - Resources: resources, - Verbs: []string{"list", "get", "watch"}, - }, - }, - } - clusterRoleBinding := &rbac.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterRoleBindingName, - Labels: buildWithDefaultLabels(map[string]string{ - fmt.Sprintf("%s-cli-version", misc.Program): version, - }, provider), - }, - RoleRef: rbac.RoleRef{ - Name: clusterRoleName, - Kind: "ClusterRole", - APIGroup: "rbac.authorization.k8s.io", - }, - Subjects: []rbac.Subject{ - { - Kind: "ServiceAccount", - Name: serviceAccountName, - Namespace: namespace, - }, - }, - } - _, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{}) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return err - } - _, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{}) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return err - } - _, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{}) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return err - } - return nil } -func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error { - serviceAccount := &core.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceAccountName, - Labels: buildWithDefaultLabels(map[string]string{ - fmt.Sprintf("%s-cli-version", misc.Program): version, - }, provider), +func (provider *Provider) BuildClusterRole() *rbac.ClusterRole { + return &rbac.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", }, - } - role := &rbac.Role{ ObjectMeta: metav1.ObjectMeta{ - Name: roleName, + Name: ClusterRoleName, Labels: buildWithDefaultLabels(map[string]string{ - fmt.Sprintf("%s-cli-version", misc.Program): version, + fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion, }, provider), }, Rules: []rbac.PolicyRule{ @@ -548,35 +533,48 @@ func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, }, }, } - roleBinding := &rbac.RoleBinding{ +} + +func (provider *Provider) BuildClusterRoleBinding() *rbac.ClusterRoleBinding { + return &rbac.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + }, ObjectMeta: metav1.ObjectMeta{ - Name: roleBindingName, + Name: ServiceAccountName, Labels: buildWithDefaultLabels(map[string]string{ - fmt.Sprintf("%s-cli-version", misc.Program): version, + fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion, }, provider), }, RoleRef: rbac.RoleRef{ - Name: roleName, - Kind: "Role", + Name: ClusterRoleName, + Kind: "ClusterRole", APIGroup: "rbac.authorization.k8s.io", }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", - Name: serviceAccountName, - Namespace: namespace, + Name: ServiceAccountName, + Namespace: config.Config.Tap.SelfNamespace, }, }, } +} + +func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string) error { + serviceAccount := provider.BuildServiceAccount() + clusterRole := provider.BuildClusterRole() + clusterRoleBinding := provider.BuildClusterRoleBinding() + _, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { return err } - _, err = provider.clientSet.RbacV1().Roles(namespace).Create(ctx, role, metav1.CreateOptions{}) + _, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { return err } - _, err = provider.clientSet.RbacV1().RoleBindings(namespace).Create(ctx, roleBinding, metav1.CreateOptions{}) + _, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { return err } @@ -644,7 +642,6 @@ func (provider *Provider) handleRemovalError(err error) error { } func (provider *Provider) BuildWorkerDaemonSet( - ctx context.Context, podImage string, podName string, serviceAccountName string, @@ -798,6 +795,9 @@ func (provider *Provider) BuildWorkerDaemonSet( // Pod pod := core.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, ObjectMeta: metav1.ObjectMeta{ Name: podName, Labels: buildWithDefaultLabels(map[string]string{ @@ -805,6 +805,7 @@ func (provider *Provider) BuildWorkerDaemonSet( }, provider), }, Spec: core.PodSpec{ + ServiceAccountName: ServiceAccountName, HostNetwork: true, Containers: containers, Volumes: []core.Volume{procfsVolume, sysfsVolume}, @@ -826,6 +827,9 @@ func (provider *Provider) BuildWorkerDaemonSet( } return &DaemonSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + }, Spec: DaemonSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: buildWithDefaultLabels(map[string]string{ @@ -859,7 +863,6 @@ func (provider *Provider) ApplyWorkerDaemonSet( Msg("Applying worker DaemonSets.") daemonSet, err := provider.BuildWorkerDaemonSet( - ctx, podImage, podName, serviceAccountName, diff --git a/resources/createResources.go b/resources/createResources.go index 29bf88730..09e5d74a8 100644 --- a/resources/createResources.go +++ b/resources/createResources.go @@ -21,23 +21,18 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov } } - selfServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"}) + err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace) + var selfServiceAccountExists bool if err != nil { + selfServiceAccountExists = true log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software)) } - var serviceAccountName string - if selfServiceAccountExists { - serviceAccountName = kubernetes.ServiceAccountName - } else { - serviceAccountName = "" - } - - opts := &kubernetes.PodOptions{ + hubOpts := &kubernetes.PodOptions{ Namespace: selfNamespace, PodName: kubernetes.HubPodName, PodImage: docker.GetHubImage(), - ServiceAccountName: serviceAccountName, + ServiceAccountName: kubernetes.ServiceAccountName, Resources: hubResources, ImagePullPolicy: imagePullPolicy, ImagePullSecrets: imagePullSecrets, @@ -48,14 +43,14 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov Namespace: selfNamespace, PodName: kubernetes.FrontPodName, PodImage: docker.GetWorkerImage(), - ServiceAccountName: serviceAccountName, + ServiceAccountName: kubernetes.ServiceAccountName, Resources: hubResources, ImagePullPolicy: imagePullPolicy, ImagePullSecrets: imagePullSecrets, Debug: debug, } - if err := createSelfHubPod(ctx, kubernetesProvider, opts); err != nil { + if err := createSelfHubPod(ctx, kubernetesProvider, hubOpts); err != nil { return selfServiceAccountExists, err } @@ -64,14 +59,14 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov } // TODO: Why the port values need to be 80? - _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80) + _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace)) if err != nil { return selfServiceAccountExists, err } log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.") - _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort)) + _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace)) if err != nil { return selfServiceAccountExists, err } @@ -82,24 +77,10 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov } func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error { - _, err := kubernetesProvider.CreateNamespace(ctx, selfNamespace) + _, err := kubernetesProvider.CreateNamespace(ctx, kubernetesProvider.BuildNamespace(selfNamespace)) return err } -func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) { - if !isNsRestrictedMode { - if err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil { - return false, err - } - } else { - if err := kubernetesProvider.CreateSelfRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil { - return false, err - } - } - - return true, nil -} - func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error { pod, err := kubernetesProvider.BuildHubPod(opts) if err != nil { diff --git a/utils/pretty.go b/utils/pretty.go index fde61e3e0..109033871 100644 --- a/utils/pretty.go +++ b/utils/pretty.go @@ -35,3 +35,18 @@ func PrettyYaml(data interface{}) (string, error) { } return buffer.String(), nil } + +func PrettyYamlOmitEmpty(data interface{}) (string, error) { + d, err := json.Marshal(data) + if err != nil { + return empty, err + } + + var cleanData map[string]interface{} + err = json.Unmarshal(d, &cleanData) + if err != nil { + return empty, err + } + + return PrettyYaml(cleanData) +}