Add manifests command to generate Kubernetes manifests

This commit is contained in:
M. Mert Yildiran 2023-04-11 01:54:06 +03:00
parent d1b17d4534
commit 5dafc015bb
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
11 changed files with 331 additions and 133 deletions

View File

@ -5,7 +5,7 @@ import (
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false)
if err != nil {
return
}

View File

@ -58,7 +58,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
}
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
func getKubernetesProviderForCli(silent bool) (*kubernetes.Provider, error) {
kubeConfigPath := config.Config.KubeConfigPath()
kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context)
if err != nil {
@ -66,7 +66,9 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
return nil, err
}
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
if !silent {
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
}
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
handleKubernetesProviderError(err)

View File

@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{
Use: "logs",
Short: "Create a ZIP file with logs for GitHub issues or troubleshooting",
RunE: func(cmd *cobra.Command, args []string) error {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false)
if err != nil {
return nil
}

178
cmd/manifests.go Normal file
View File

@ -0,0 +1,178 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"sort"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
const manifestSeperator = "---"
var manifestsCmd = &cobra.Command{
Use: "manifests",
Short: "Generate Kubernetes manifests of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runManifests()
return nil
},
}
func init() {
rootCmd.AddCommand(manifestsCmd)
defaultManifestsConfig := config.ManifestsConfig{}
if err := defaults.Set(&defaultManifestsConfig); err != nil {
log.Debug().Err(err).Send()
}
manifestsCmd.Flags().Bool("dump", defaultManifestsConfig.Dump, "Enable the debug mode")
}
func runManifests() {
kubernetesProvider, err := getKubernetesProviderForCli(true)
if err != nil {
log.Error().Err(err).Send()
return
}
namespace := kubernetesProvider.BuildNamespace(config.Config.Tap.SelfNamespace)
serviceAccount := kubernetesProvider.BuildServiceAccount()
clusterRole := kubernetesProvider.BuildClusterRole()
clusterRoleBinding := kubernetesProvider.BuildClusterRoleBinding()
hubPod, err := kubernetesProvider.BuildHubPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
})
if err != nil {
log.Error().Err(err).Send()
return
}
hubService := kubernetesProvider.BuildHubService(config.Config.Tap.SelfNamespace)
frontPod, err := kubernetesProvider.BuildFrontPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
log.Error().Err(err).Send()
return
}
frontService := kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace)
workerDaemonSet, err := kubernetesProvider.BuildWorkerDaemonSet(
kubernetes.WorkerDaemonSetName,
kubernetes.WorkerPodName,
kubernetes.ServiceAccountName,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
log.Error().Err(err).Send()
return
}
if config.Config.Manifests.Dump {
err = dumpManifests(map[string]interface{}{
"00-namespace.yaml": namespace,
"01-service-account.yaml": serviceAccount,
"02-cluster-role.yaml": clusterRole,
"03-cluster-role-binding.yaml": clusterRoleBinding,
"04-hub-pod.yaml": hubPod,
"05-hub-service.yaml": hubService,
"06-front-pod.yaml": frontPod,
"07-front-service.yaml": frontService,
"08-worker-daemon-set.yaml": workerDaemonSet,
})
} else {
err = printManifests([]interface{}{
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
workerDaemonSet,
})
}
if err != nil {
log.Error().Err(err).Send()
return
}
}
func dumpManifests(objects map[string]interface{}) error {
folder := filepath.Join(".", "manifests")
err := os.MkdirAll(folder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
path := filepath.Join(folder, filename)
err = os.WriteFile(path, []byte(manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Manifest generated: %s", path)
}
return nil
}
func printManifests(objects []interface{}) error {
for _, object := range objects {
manifest, err := utils.PrettyYamlOmitEmpty(object)
if err != nil {
return err
}
fmt.Println(manifestSeperator)
fmt.Println(manifest)
}
return nil
}

View File

@ -15,7 +15,7 @@ import (
)
func runProxy(block bool, noBrowser bool) {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false)
if err != nil {
return
}

View File

@ -62,7 +62,7 @@ func tap() {
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false)
if err != nil {
return
}

View File

@ -52,13 +52,23 @@ func InitConfig(cmd *cobra.Command) error {
return nil
}
if cmd.Use != "console" && cmd.Use != "pro" {
if !utils.Contains([]string{
"console",
"pro",
"manifests",
}, cmd.Use) {
go version.CheckNewerVersion()
}
Config = CreateDefaultConfig()
cmdName = cmd.Name()
if utils.Contains([]string{"clean", "console", "pro", "proxy", "scripts"}, cmdName) {
if utils.Contains([]string{
"clean",
"console",
"pro",
"proxy",
"scripts",
}, cmdName) {
cmdName = "tap"
}
@ -67,7 +77,9 @@ func InitConfig(cmd *cobra.Command) error {
}
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
if err := loadConfigFile(&Config); err != nil {
if err := loadConfigFile(&Config, utils.Contains([]string{
"manifests",
}, cmd.Use)); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("invalid config, %w\n"+
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, ConfigFilePath)
@ -115,7 +127,7 @@ func WriteConfig(config *ConfigStruct) error {
return nil
}
func loadConfigFile(config *ConfigStruct) error {
func loadConfigFile(config *ConfigStruct, silent bool) error {
cwd, err := os.Getwd()
if err != nil {
return err
@ -141,7 +153,9 @@ func loadConfigFile(config *ConfigStruct) error {
return err
}
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
if !silent {
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
}
return nil
}

View File

@ -23,6 +23,10 @@ type KubeConfig struct {
Context string `yaml:"context"`
}
type ManifestsConfig struct {
Dump bool `yaml:"dump"`
}
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
@ -34,6 +38,7 @@ type ConfigStruct struct {
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"`
Manifests ManifestsConfig `yaml:"manifests,omitempty"`
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {

View File

@ -156,14 +156,20 @@ func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name str
return err
}
func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*core.Namespace, error) {
namespaceSpec := &core.Namespace{
func (provider *Provider) BuildNamespace(name string) *core.Namespace {
return &core.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
}
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{})
}
func (provider *Provider) CreateNamespace(ctx context.Context, namespace *core.Namespace) (*core.Namespace, error) {
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{})
}
type PodOptions struct {
@ -223,6 +229,9 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
}
pod := &core.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Labels: buildWithDefaultLabels(map[string]string{
@ -230,6 +239,7 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
Containers: containers,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
@ -257,10 +267,6 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
}
}
//define the service account only when it exists to prevent pod crash
if opts.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = opts.ServiceAccountName
}
return pod, nil
}
@ -330,6 +336,9 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
}
pod := &core.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Labels: buildWithDefaultLabels(map[string]string{
@ -337,6 +346,7 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
Containers: containers,
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
@ -365,10 +375,6 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
}
}
//define the service account only when it exists to prevent pod crash
if opts.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = opts.ServiceAccountName
}
return pod, nil
}
@ -376,25 +382,54 @@ func (provider *Provider) CreatePod(ctx context.Context, namespace string, podSp
return provider.clientSet.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{})
}
func (provider *Provider) CreateService(ctx context.Context, namespace string, serviceName string, appLabelValue string, targetPort int, port int32) (*core.Service, error) {
service := core.Service{
func (provider *Provider) BuildHubService(namespace string) *core.Service {
return &core.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Name: HubServiceName,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: serviceName,
TargetPort: intstr.FromInt(targetPort),
Port: port,
Name: HubServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
},
},
Type: core.ServiceTypeClusterIP,
Selector: map[string]string{"app": appLabelValue},
Selector: map[string]string{"app": HubServiceName},
},
}
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{})
}
func (provider *Provider) BuildFrontService(namespace string) *core.Service {
return &core.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: FrontServiceName,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: FrontServiceName,
TargetPort: intstr.FromInt(80),
Port: int32(config.Config.Tap.Proxy.Front.DstPort),
},
},
Type: core.ServiceTypeClusterIP,
Selector: map[string]string{"app": FrontServiceName},
},
}
}
func (provider *Provider) CreateService(ctx context.Context, namespace string, service *core.Service) (*core.Service, error) {
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
}
func (provider *Provider) CanI(ctx context.Context, namespace string, resource string, verb string, group string) (bool, error) {
@ -465,79 +500,29 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo
return resource != nil, nil
}
func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error {
serviceAccount := &core.ServiceAccount{
func (provider *Provider) BuildServiceAccount() *core.ServiceAccount {
return &core.ServiceAccount{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Name: ServiceAccountName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
}
clusterRole := &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: resources,
Verbs: []string{"list", "get", "watch"},
},
},
}
clusterRoleBinding := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
RoleRef: rbac.RoleRef{
Name: clusterRoleName,
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
}
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error {
serviceAccount := &core.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
func (provider *Provider) BuildClusterRole() *rbac.ClusterRole {
return &rbac.ClusterRole{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRole",
},
}
role := &rbac.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Name: ClusterRoleName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
Rules: []rbac.PolicyRule{
@ -548,35 +533,48 @@ func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context,
},
},
}
roleBinding := &rbac.RoleBinding{
}
func (provider *Provider) BuildClusterRoleBinding() *rbac.ClusterRoleBinding {
return &rbac.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingName,
Name: ServiceAccountName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
RoleRef: rbac.RoleRef{
Name: roleName,
Kind: "Role",
Name: ClusterRoleName,
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
Name: ServiceAccountName,
Namespace: config.Config.Tap.SelfNamespace,
},
},
}
}
func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string) error {
serviceAccount := provider.BuildServiceAccount()
clusterRole := provider.BuildClusterRole()
clusterRoleBinding := provider.BuildClusterRoleBinding()
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().Roles(namespace).Create(ctx, role, metav1.CreateOptions{})
_, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().RoleBindings(namespace).Create(ctx, roleBinding, metav1.CreateOptions{})
_, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
@ -644,7 +642,6 @@ func (provider *Provider) handleRemovalError(err error) error {
}
func (provider *Provider) BuildWorkerDaemonSet(
ctx context.Context,
podImage string,
podName string,
serviceAccountName string,
@ -798,6 +795,9 @@ func (provider *Provider) BuildWorkerDaemonSet(
// Pod
pod := core.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: buildWithDefaultLabels(map[string]string{
@ -805,6 +805,7 @@ func (provider *Provider) BuildWorkerDaemonSet(
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
HostNetwork: true,
Containers: containers,
Volumes: []core.Volume{procfsVolume, sysfsVolume},
@ -826,6 +827,9 @@ func (provider *Provider) BuildWorkerDaemonSet(
}
return &DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
},
Spec: DaemonSetSpec{
Selector: metav1.LabelSelector{
MatchLabels: buildWithDefaultLabels(map[string]string{
@ -859,7 +863,6 @@ func (provider *Provider) ApplyWorkerDaemonSet(
Msg("Applying worker DaemonSets.")
daemonSet, err := provider.BuildWorkerDaemonSet(
ctx,
podImage,
podName,
serviceAccountName,

View File

@ -21,23 +21,18 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
}
}
selfServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"})
err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace)
var selfServiceAccountExists bool
if err != nil {
selfServiceAccountExists = true
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
}
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = kubernetes.ServiceAccountName
} else {
serviceAccountName = ""
}
opts := &kubernetes.PodOptions{
hubOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName,
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
@ -48,14 +43,14 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
Namespace: selfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName,
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
if err := createSelfHubPod(ctx, kubernetesProvider, opts); err != nil {
if err := createSelfHubPod(ctx, kubernetesProvider, hubOpts); err != nil {
return selfServiceAccountExists, err
}
@ -64,14 +59,14 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80)
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort))
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
@ -82,24 +77,10 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
}
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, selfNamespace)
_, err := kubernetesProvider.CreateNamespace(ctx, kubernetesProvider.BuildNamespace(selfNamespace))
return err
}
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) {
if !isNsRestrictedMode {
if err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil {
return false, err
}
} else {
if err := kubernetesProvider.CreateSelfRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil {
return false, err
}
}
return true, nil
}
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildHubPod(opts)
if err != nil {

View File

@ -35,3 +35,18 @@ func PrettyYaml(data interface{}) (string, error) {
}
return buffer.String(), nil
}
func PrettyYamlOmitEmpty(data interface{}) (string, error) {
d, err := json.Marshal(data)
if err != nil {
return empty, err
}
var cleanData map[string]interface{}
err = json.Unmarshal(d, &cleanData)
if err != nil {
return empty, err
}
return PrettyYaml(cleanData)
}