mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-17 07:16:09 +00:00
✨ Use the Helm chart in tap
command to install Kubeshark (#1362)
* ✨ Use the Helm chart in `tap` command to install Kubeshark * ⬆️ Set Go version to `1.19` in `go.mod` file * ✨ Add `Helm` struct`, `NewHelm` and `NewHelmDefault` methods * ⚡ Better logging and error return * ⚡ Pass the config as `values.yaml` to Helm install * 🔥 Remove `helm-chart`, `manifests` and `check` commands * ➖ Run `go mod tidy` * 🎨 Move `helm` package into `kubernetes` package * 🔥 Remove `# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!` notice from the manifests and Helm templates * 🔥 Remove the unused `GenerateApplyConfiguration` and `buildWithDefaultLabels` methods
This commit is contained in:
21
cmd/check.go
21
cmd/check.go
@@ -1,21 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var checkCmd = &cobra.Command{
|
||||
Use: "check",
|
||||
Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runCheck()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(checkCmd)
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/semver"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
|
||||
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
|
||||
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.Kube.Context)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Can't initialize the client!")
|
||||
return nil, nil, false
|
||||
}
|
||||
log.Info().Msg("Initialization of the client is passed.")
|
||||
|
||||
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Can't query the Kubernetes API!")
|
||||
return nil, nil, false
|
||||
}
|
||||
log.Info().Msg("Querying the Kubernetes API is passed.")
|
||||
|
||||
return kubernetesProvider, kubernetesVersion, true
|
||||
}
|
@@ -1,59 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
)
|
||||
|
||||
func KubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, kubernetesProvider.BuildClusterRole().Rules, "")
|
||||
}
|
||||
|
||||
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
|
||||
permissionsExist := true
|
||||
|
||||
for _, rule := range rules {
|
||||
for _, group := range rule.APIGroups {
|
||||
for _, resource := range rule.Resources {
|
||||
for _, verb := range rule.Verbs {
|
||||
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
|
||||
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return permissionsExist
|
||||
}
|
||||
|
||||
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
|
||||
var groupAndNamespace string
|
||||
if group != "" && namespace != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
|
||||
} else if group != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
|
||||
} else if namespace != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("verb", verb).
|
||||
Str("resource", resource).
|
||||
Str("group-and-namespace", groupAndNamespace).
|
||||
Err(err).
|
||||
Msg("While checking Kubernetes permissions!")
|
||||
return false
|
||||
} else if !exist {
|
||||
log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace))
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace))
|
||||
return true
|
||||
}
|
@@ -1,118 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
|
||||
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.Tap.SelfNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.Tap.SelfNamespace, "namespace", exist, err)
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.ServiceAccountName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
||||
} else {
|
||||
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
||||
}
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
|
||||
|
||||
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
||||
|
||||
return allResourcesExist
|
||||
}
|
||||
|
||||
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubPodName); err != nil {
|
||||
log.Error().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Err(err).
|
||||
Msg("While checking if pod is running!")
|
||||
return false
|
||||
} else if len(pods) == 0 {
|
||||
log.Error().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Msg("Pod doesn't exist!")
|
||||
return false
|
||||
} else if !kubernetes.IsPodRunning(&pods[0]) {
|
||||
log.Error().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Msg("Pod is not running!")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Msg("Pod is running.")
|
||||
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.WorkerPodName); err != nil {
|
||||
log.Error().
|
||||
Str("name", kubernetes.WorkerPodName).
|
||||
Err(err).
|
||||
Msg("While checking if pods are running!")
|
||||
return false
|
||||
} else {
|
||||
workers := 0
|
||||
notRunningWorkers := 0
|
||||
|
||||
for _, pod := range pods {
|
||||
workers += 1
|
||||
if !kubernetes.IsPodRunning(&pod) {
|
||||
notRunningWorkers += 1
|
||||
}
|
||||
}
|
||||
|
||||
if notRunningWorkers > 0 {
|
||||
log.Error().
|
||||
Str("name", kubernetes.WorkerPodName).
|
||||
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("name", kubernetes.WorkerPodName).
|
||||
Msg(fmt.Sprintf("All %d pods are running.", workers))
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("name", resourceName).
|
||||
Str("type", resourceType).
|
||||
Err(err).
|
||||
Msg("Checking if resource exists!")
|
||||
return false
|
||||
} else if !exist {
|
||||
log.Error().
|
||||
Str("name", resourceName).
|
||||
Str("type", resourceType).
|
||||
Msg("Resource doesn't exist!")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("name", resourceName).
|
||||
Str("type", resourceType).
|
||||
Msg("Resource exist.")
|
||||
return true
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/semver"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
|
||||
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
|
||||
|
||||
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
|
||||
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
|
||||
return true
|
||||
}
|
@@ -1,40 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
|
||||
|
||||
var connectedToHub, connectedToFront bool
|
||||
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), "/echo", kubernetesProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
|
||||
} else {
|
||||
connectedToHub = true
|
||||
log.Info().Msg("Connected successfully to Hub using proxy.")
|
||||
}
|
||||
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port), "", kubernetesProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
|
||||
} else {
|
||||
connectedToFront = true
|
||||
log.Info().Msg("Connected successfully to Front using proxy.")
|
||||
}
|
||||
|
||||
return connectedToHub && connectedToFront
|
||||
}
|
||||
|
||||
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
|
||||
log.Info().Str("url", serverUrl).Msg("Connecting:")
|
||||
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,47 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubeshark/kubeshark/cmd/check"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func runCheck() {
|
||||
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesVersion(kubernetesVersion)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesPermissions(ctx, kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.ServerConnection(kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
|
||||
} else {
|
||||
log.Error().
|
||||
Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)).
|
||||
Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
|
||||
Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software)))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/kubernetes/helm"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -14,7 +15,12 @@ var cleanCmd = &cobra.Command{
|
||||
Use: "clean",
|
||||
Short: fmt.Sprintf("Removes all %s resources", misc.Software),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
performCleanCommand()
|
||||
resp, err := helm.NewHelmDefault().Uninstall()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
} else {
|
||||
log.Info().Msgf("Uninstalled the Helm release: %s", resp.Release.Name)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@@ -1,14 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
)
|
||||
|
||||
func performCleanCommand() {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, false)
|
||||
}
|
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/misc/fsUtils"
|
||||
"github.com/kubeshark/kubeshark/resources"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -100,13 +99,10 @@ func handleKubernetesProviderError(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, withoutCleanup bool) {
|
||||
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string) {
|
||||
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
|
||||
defer cancel()
|
||||
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
|
||||
if !withoutCleanup {
|
||||
resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {
|
||||
|
488
cmd/helmChart.go
488
cmd/helmChart.go
@@ -1,488 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/misc/fsUtils"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/ohler55/ojg/jp"
|
||||
"github.com/ohler55/ojg/oj"
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var helmChartCmd = &cobra.Command{
|
||||
Use: "helm-chart",
|
||||
Short: "Generate Helm chart of Kubeshark",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runHelmChart()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Maintainer describes a Chart maintainer.
|
||||
type Maintainer struct {
|
||||
// Name is a user name or organization name
|
||||
Name string `json:"name,omitempty"`
|
||||
// Email is an optional email address to contact the named maintainer
|
||||
Email string `json:"email,omitempty"`
|
||||
// URL is an optional URL to an address for the named maintainer
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// Metadata for a Chart file. This models the structure of a Chart.yaml file.
|
||||
type Metadata struct {
|
||||
// The name of the chart. Required.
|
||||
Name string `json:"name,omitempty"`
|
||||
// The URL to a relevant project page, git repo, or contact person
|
||||
Home string `json:"home,omitempty"`
|
||||
// Source is the URL to the source code of this chart
|
||||
Sources []string `json:"sources,omitempty"`
|
||||
// A SemVer 2 conformant version string of the chart. Required.
|
||||
Version string `json:"version,omitempty"`
|
||||
// A one-sentence description of the chart
|
||||
Description string `json:"description,omitempty"`
|
||||
// A list of string keywords
|
||||
Keywords []string `json:"keywords,omitempty"`
|
||||
// A list of name and URL/email address combinations for the maintainer(s)
|
||||
Maintainers []*Maintainer `json:"maintainers,omitempty"`
|
||||
// The URL to an icon file.
|
||||
Icon string `json:"icon,omitempty"`
|
||||
// The API Version of this chart. Required.
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
// The condition to check to enable chart
|
||||
Condition string `json:"condition,omitempty"`
|
||||
// The tags to check to enable chart
|
||||
Tags string `json:"tags,omitempty"`
|
||||
// The version of the application enclosed inside of this chart.
|
||||
AppVersion string `json:"appVersion,omitempty"`
|
||||
// Whether or not this chart is deprecated
|
||||
Deprecated bool `json:"deprecated,omitempty"`
|
||||
// Annotations are additional mappings uninterpreted by Helm,
|
||||
// made available for inspection by other applications.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
// KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
|
||||
KubeVersion string `json:"kubeVersion,omitempty"`
|
||||
// Dependencies are a list of dependencies for a chart.
|
||||
Dependencies []*Dependency `json:"dependencies,omitempty"`
|
||||
// Specifies the chart type: application or library
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// Dependency describes a chart upon which another chart depends.
|
||||
//
|
||||
// Dependencies can be used to express developer intent, or to capture the state
|
||||
// of a chart.
|
||||
type Dependency struct {
|
||||
// Name is the name of the dependency.
|
||||
//
|
||||
// This must mach the name in the dependency's Chart.yaml.
|
||||
Name string `json:"name"`
|
||||
// Version is the version (range) of this chart.
|
||||
//
|
||||
// A lock file will always produce a single version, while a dependency
|
||||
// may contain a semantic version range.
|
||||
Version string `json:"version,omitempty"`
|
||||
// The URL to the repository.
|
||||
//
|
||||
// Appending `index.yaml` to this string should result in a URL that can be
|
||||
// used to fetch the repository index.
|
||||
Repository string `json:"repository"`
|
||||
// A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
|
||||
Condition string `json:"condition,omitempty"`
|
||||
// Tags can be used to group charts for enabling/disabling together
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
// Enabled bool determines if chart should be loaded
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
// ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
|
||||
// string or pair of child/parent sublist items.
|
||||
ImportValues []interface{} `json:"import-values,omitempty"`
|
||||
// Alias usable alias to be used for the chart
|
||||
Alias string `json:"alias,omitempty"`
|
||||
}
|
||||
|
||||
var namespaceMappings = map[string]interface{}{
|
||||
"metadata.name": "{{ .Values.tap.selfnamespace }}",
|
||||
}
|
||||
var serviceAccountMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
}
|
||||
var clusterRoleMappings = serviceAccountMappings
|
||||
var clusterRoleBindingMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"subjects[0].namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
}
|
||||
var hubPodMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"spec.containers[0].env": []map[string]interface{}{
|
||||
{
|
||||
"name": "POD_REGEX",
|
||||
"value": "{{ .Values.tap.regex }}",
|
||||
},
|
||||
{
|
||||
"name": "NAMESPACES",
|
||||
"value": "{{ gt (len .Values.tap.namespaces) 0 | ternary (join \",\" .Values.tap.namespaces) \"\" }}",
|
||||
},
|
||||
{
|
||||
"name": "LICENSE",
|
||||
"value": "{{ .Values.license }}",
|
||||
},
|
||||
{
|
||||
"name": "SCRIPTING_ENV",
|
||||
"value": "{}",
|
||||
},
|
||||
{
|
||||
"name": "SCRIPTING_SCRIPTS",
|
||||
"value": "[]",
|
||||
},
|
||||
{
|
||||
"name": "AUTH_APPROVED_DOMAINS",
|
||||
"value": "{{ gt (len .Values.tap.ingress.auth.approvedDomains) 0 | ternary (join \",\" .Values.tap.ingress.auth.approvedDomains) \"\" }}",
|
||||
},
|
||||
},
|
||||
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
|
||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
|
||||
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
|
||||
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
|
||||
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
|
||||
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.requests.memory }}",
|
||||
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
|
||||
}
|
||||
var hubServiceMappings = serviceAccountMappings
|
||||
var frontPodMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
|
||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
|
||||
"spec.containers[0].env": []map[string]interface{}{
|
||||
{
|
||||
"name": "REACT_APP_DEFAULT_FILTER",
|
||||
"value": " ",
|
||||
},
|
||||
{
|
||||
"name": "REACT_APP_HUB_HOST",
|
||||
"value": " ",
|
||||
},
|
||||
{
|
||||
"name": "REACT_APP_HUB_PORT",
|
||||
"value": "{{ .Values.tap.ingress.enabled | ternary \"80/api\" \"8898\" }}",
|
||||
},
|
||||
},
|
||||
}
|
||||
var frontServiceMappings = serviceAccountMappings
|
||||
var persistentVolumeMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"spec.resources.requests.storage": "{{ .Values.tap.storagelimit }}",
|
||||
"spec.storageClassName": "{{ .Values.tap.storageclass }}",
|
||||
}
|
||||
var workerDaemonSetMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
|
||||
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
|
||||
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
|
||||
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
|
||||
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
|
||||
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
|
||||
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
|
||||
"spec.template.spec.containers[0].command[4]": "{{ .Values.tap.proxy.worker.srvport }}",
|
||||
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
|
||||
}
|
||||
var ingressClassMappings = serviceAccountMappings
|
||||
var ingressMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"metadata.annotations[\"certmanager.k8s.io/cluster-issuer\"]": "{{ .Values.tap.ingress.certManager }}",
|
||||
"spec.rules[0].host": "{{ .Values.tap.ingress.host }}",
|
||||
"spec.tls": "{{ .Values.tap.ingress.tls | toYaml }}",
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(helmChartCmd)
|
||||
}
|
||||
|
||||
func runHelmChart() {
|
||||
namespace,
|
||||
serviceAccount,
|
||||
clusterRole,
|
||||
clusterRoleBinding,
|
||||
hubPod,
|
||||
hubService,
|
||||
frontPod,
|
||||
frontService,
|
||||
persistentVolume,
|
||||
workerDaemonSet,
|
||||
ingressClass,
|
||||
ingress,
|
||||
err := generateManifests()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
err = dumpHelmChart(map[string]interface{}{
|
||||
"00-namespace.yaml": template(namespace, namespaceMappings),
|
||||
"01-service-account.yaml": template(serviceAccount, serviceAccountMappings),
|
||||
"02-cluster-role.yaml": template(clusterRole, clusterRoleMappings),
|
||||
"03-cluster-role-binding.yaml": template(clusterRoleBinding, clusterRoleBindingMappings),
|
||||
"04-hub-pod.yaml": template(hubPod, hubPodMappings),
|
||||
"05-hub-service.yaml": template(hubService, hubServiceMappings),
|
||||
"06-front-pod.yaml": template(frontPod, frontPodMappings),
|
||||
"07-front-service.yaml": template(frontService, frontServiceMappings),
|
||||
"08-persistent-volume-claim.yaml": template(persistentVolume, persistentVolumeMappings),
|
||||
"09-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
|
||||
"10-ingress-class.yaml": template(ingressClass, ingressClassMappings),
|
||||
"11-ingress.yaml": template(ingress, ingressMappings),
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func template(object interface{}, mappings map[string]interface{}) (template interface{}) {
|
||||
var err error
|
||||
var data []byte
|
||||
data, err = json.Marshal(object)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
var obj interface{}
|
||||
obj, err = oj.Parse(data)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for path, value := range mappings {
|
||||
var x jp.Expr
|
||||
x, err = jp.ParseString(path)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
err = x.Set(obj, value)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
newJson := oj.JSON(obj)
|
||||
|
||||
err = json.Unmarshal([]byte(newJson), &template)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func handleHubPod(manifest string) string {
|
||||
lines := strings.Split(manifest, "\n")
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
|
||||
lines[i] = " hostPort: {{ .Values.tap.proxy.hub.srvport }}"
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func handleFrontPod(manifest string) string {
|
||||
lines := strings.Split(manifest, "\n")
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
|
||||
lines[i] = " hostPort: {{ .Values.tap.proxy.front.srvport }}"
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func handlePVCManifest(manifest string) string {
|
||||
return fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s{{- end }}\n", manifest)
|
||||
}
|
||||
|
||||
func handleDaemonSetManifest(manifest string) string {
|
||||
lines := strings.Split(manifest, "\n")
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.TrimSpace(line) == "- mountPath: /app/data" {
|
||||
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(line) == "name: kubeshark-persistent-volume" {
|
||||
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(line) == "- name: kubeshark-persistent-volume" {
|
||||
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(line) == "claimName: kubeshark-persistent-volume-claim" {
|
||||
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "- containerPort:") {
|
||||
lines[i] = " - containerPort: {{ .Values.tap.proxy.worker.srvport }}"
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
|
||||
lines[i] = " hostPort: {{ .Values.tap.proxy.worker.srvport }}"
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func handleIngressClass(manifest string) string {
|
||||
return fmt.Sprintf("{{- if .Values.tap.ingress.enabled }}\n%s{{- end }}\n", manifest)
|
||||
}
|
||||
|
||||
func handleIngress(manifest string) string {
|
||||
manifest = strings.Replace(manifest, "'{{ .Values.tap.ingress.tls | toYaml }}'", "{{ .Values.tap.ingress.tls | toYaml }}", 1)
|
||||
|
||||
return handleIngressClass(manifest)
|
||||
}
|
||||
|
||||
func dumpHelmChart(objects map[string]interface{}) error {
|
||||
folder := filepath.Join(".", "helm-chart")
|
||||
templatesFolder := filepath.Join(folder, "templates")
|
||||
|
||||
err := fsUtils.RemoveFilesByExtension(templatesFolder, "yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(templatesFolder, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sort by filenames
|
||||
filenames := make([]string, 0)
|
||||
for filename := range objects {
|
||||
filenames = append(filenames, filename)
|
||||
}
|
||||
sort.Strings(filenames)
|
||||
|
||||
// Generate templates
|
||||
for _, filename := range filenames {
|
||||
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if filename == "04-hub-pod.yaml" {
|
||||
manifest = handleHubPod(manifest)
|
||||
}
|
||||
|
||||
if filename == "06-front-pod.yaml" {
|
||||
manifest = handleFrontPod(manifest)
|
||||
}
|
||||
|
||||
if filename == "08-persistent-volume-claim.yaml" {
|
||||
manifest = handlePVCManifest(manifest)
|
||||
}
|
||||
|
||||
if filename == "09-worker-daemon-set.yaml" {
|
||||
manifest = handleDaemonSetManifest(manifest)
|
||||
}
|
||||
|
||||
if filename == "10-ingress-class.yaml" {
|
||||
manifest = handleIngressClass(manifest)
|
||||
}
|
||||
|
||||
if filename == "11-ingress.yaml" {
|
||||
manifest = handleIngress(manifest)
|
||||
}
|
||||
|
||||
path := filepath.Join(templatesFolder, filename)
|
||||
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Helm chart template generated: %s", path)
|
||||
}
|
||||
|
||||
// Copy LICENSE
|
||||
licenseSrcPath := filepath.Join(".", "LICENSE")
|
||||
licenseDstPath := filepath.Join(folder, "LICENSE")
|
||||
err = copy.Copy(licenseSrcPath, licenseDstPath)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Str("path", licenseSrcPath).Msg("Couldn't find the license:")
|
||||
} else {
|
||||
log.Info().Msgf("Helm chart license copied: %s", licenseDstPath)
|
||||
}
|
||||
|
||||
// Generate Chart.yaml
|
||||
chartMetadata := Metadata{
|
||||
APIVersion: "v2",
|
||||
Name: misc.Program,
|
||||
Description: misc.Description,
|
||||
Home: misc.Website,
|
||||
Sources: []string{"https://github.com/kubeshark/kubeshark/tree/master/helm-chart"},
|
||||
Keywords: []string{
|
||||
"kubeshark",
|
||||
"packet capture",
|
||||
"traffic capture",
|
||||
"traffic analyzer",
|
||||
"network sniffer",
|
||||
"observability",
|
||||
"devops",
|
||||
"microservice",
|
||||
"forensics",
|
||||
"api",
|
||||
},
|
||||
Maintainers: []*Maintainer{
|
||||
{
|
||||
Name: misc.Software,
|
||||
Email: misc.Email,
|
||||
URL: misc.Website,
|
||||
},
|
||||
},
|
||||
Version: misc.Ver,
|
||||
AppVersion: misc.Ver,
|
||||
KubeVersion: fmt.Sprintf(">= %s-0", kubernetes.MinKubernetesServerVersion),
|
||||
Type: "application",
|
||||
}
|
||||
|
||||
chart, err := utils.PrettyYamlOmitEmpty(chartMetadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(folder, "Chart.yaml")
|
||||
err = os.WriteFile(path, []byte(chart), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Helm chart Chart.yaml generated: %s", path)
|
||||
|
||||
// Generate values.yaml
|
||||
values, err := utils.PrettyYaml(config.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join(folder, "values.yaml")
|
||||
err = os.WriteFile(path, []byte(values), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Helm chart values.yaml generated: %s", path)
|
||||
|
||||
return nil
|
||||
}
|
239
cmd/manifests.go
239
cmd/manifests.go
@@ -1,239 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc/fsUtils"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
)
|
||||
|
||||
const manifestSeperator = "---"
|
||||
const manifestHeader = "# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!\n" + manifestSeperator + "\n"
|
||||
|
||||
var manifestsCmd = &cobra.Command{
|
||||
Use: "manifests",
|
||||
Short: "Generate Kubernetes manifests of Kubeshark",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runManifests()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(manifestsCmd)
|
||||
|
||||
defaultManifestsConfig := config.ManifestsConfig{}
|
||||
if err := defaults.Set(&defaultManifestsConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
manifestsCmd.Flags().Bool("dump", defaultManifestsConfig.Dump, "Enable the debug mode")
|
||||
}
|
||||
|
||||
func runManifests() {
|
||||
namespace,
|
||||
serviceAccount,
|
||||
clusterRole,
|
||||
clusterRoleBinding,
|
||||
hubPod,
|
||||
hubService,
|
||||
frontPod,
|
||||
frontService,
|
||||
persistentVolume,
|
||||
workerDaemonSet,
|
||||
ingressClass,
|
||||
ingress,
|
||||
err := generateManifests()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
if config.Config.Manifests.Dump {
|
||||
err = dumpManifests(map[string]interface{}{
|
||||
"00-namespace.yaml": namespace,
|
||||
"01-service-account.yaml": serviceAccount,
|
||||
"02-cluster-role.yaml": clusterRole,
|
||||
"03-cluster-role-binding.yaml": clusterRoleBinding,
|
||||
"04-hub-pod.yaml": hubPod,
|
||||
"05-hub-service.yaml": hubService,
|
||||
"06-front-pod.yaml": frontPod,
|
||||
"07-front-service.yaml": frontService,
|
||||
"08-persistent-volume-claim.yaml": persistentVolume,
|
||||
"09-worker-daemon-set.yaml": workerDaemonSet,
|
||||
"10-ingress-class.yaml": ingressClass,
|
||||
"11-ingress.yaml": ingress,
|
||||
})
|
||||
} else {
|
||||
err = printManifests([]interface{}{
|
||||
namespace,
|
||||
serviceAccount,
|
||||
clusterRole,
|
||||
clusterRoleBinding,
|
||||
hubPod,
|
||||
hubService,
|
||||
frontPod,
|
||||
frontService,
|
||||
workerDaemonSet,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func generateManifests() (
|
||||
namespace *v1.Namespace,
|
||||
serviceAccount *v1.ServiceAccount,
|
||||
clusterRole *rbac.ClusterRole,
|
||||
clusterRoleBinding *rbac.ClusterRoleBinding,
|
||||
hubPod *v1.Pod,
|
||||
hubService *v1.Service,
|
||||
frontPod *v1.Pod,
|
||||
frontService *v1.Service,
|
||||
persistentVolumeClaim *v1.PersistentVolumeClaim,
|
||||
workerDaemonSet *kubernetes.DaemonSet,
|
||||
ingressClass *networking.IngressClass,
|
||||
ingress *networking.Ingress,
|
||||
err error,
|
||||
) {
|
||||
config.Config.License = ""
|
||||
persistentStorage := config.Config.Tap.PersistentStorage
|
||||
config.Config.Tap.PersistentStorage = true
|
||||
|
||||
var kubernetesProvider *kubernetes.Provider
|
||||
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
namespace = kubernetesProvider.BuildNamespace(config.Config.Tap.SelfNamespace)
|
||||
|
||||
serviceAccount = kubernetesProvider.BuildServiceAccount()
|
||||
|
||||
clusterRole = kubernetesProvider.BuildClusterRole()
|
||||
|
||||
clusterRoleBinding = kubernetesProvider.BuildClusterRoleBinding()
|
||||
|
||||
hubPod, err = kubernetesProvider.BuildHubPod(&kubernetes.PodOptions{
|
||||
Namespace: config.Config.Tap.SelfNamespace,
|
||||
PodName: kubernetes.HubPodName,
|
||||
PodImage: docker.GetHubImage(),
|
||||
ServiceAccountName: kubernetes.ServiceAccountName,
|
||||
Resources: config.Config.Tap.Resources.Hub,
|
||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||
ImagePullSecrets: config.Config.ImagePullSecrets(),
|
||||
Debug: config.Config.Tap.Debug,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hubService = kubernetesProvider.BuildHubService(config.Config.Tap.SelfNamespace)
|
||||
|
||||
frontPod, err = kubernetesProvider.BuildFrontPod(&kubernetes.PodOptions{
|
||||
Namespace: config.Config.Tap.SelfNamespace,
|
||||
PodName: kubernetes.FrontPodName,
|
||||
PodImage: docker.GetHubImage(),
|
||||
ServiceAccountName: kubernetes.ServiceAccountName,
|
||||
Resources: config.Config.Tap.Resources.Hub,
|
||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||
ImagePullSecrets: config.Config.ImagePullSecrets(),
|
||||
Debug: config.Config.Tap.Debug,
|
||||
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
frontService = kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace)
|
||||
|
||||
persistentVolumeClaim, err = kubernetesProvider.BuildPersistentVolumeClaim()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
workerDaemonSet, err = kubernetesProvider.BuildWorkerDaemonSet(
|
||||
docker.GetWorkerImage(),
|
||||
kubernetes.WorkerDaemonSetName,
|
||||
kubernetes.ServiceAccountName,
|
||||
config.Config.Tap.Resources.Worker,
|
||||
config.Config.ImagePullPolicy(),
|
||||
config.Config.ImagePullSecrets(),
|
||||
config.Config.Tap.ServiceMesh,
|
||||
config.Config.Tap.Tls,
|
||||
config.Config.Tap.Debug,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ingressClass = kubernetesProvider.BuildIngressClass()
|
||||
ingress = kubernetesProvider.BuildIngress()
|
||||
|
||||
config.Config.Tap.PersistentStorage = persistentStorage
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func dumpManifests(objects map[string]interface{}) error {
|
||||
folder := filepath.Join(".", "manifests")
|
||||
|
||||
err := fsUtils.RemoveFilesByExtension(folder, "yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(folder, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sort by filenames
|
||||
filenames := make([]string, 0)
|
||||
for filename := range objects {
|
||||
filenames = append(filenames, filename)
|
||||
}
|
||||
sort.Strings(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(folder, filename)
|
||||
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Manifest generated: %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printManifests(objects []interface{}) error {
|
||||
for _, object := range objects {
|
||||
manifest, err := utils.PrettyYamlOmitEmpty(object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(manifestSeperator)
|
||||
fmt.Println(manifest)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -2,7 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
@@ -98,7 +98,7 @@ func runLicenseRecieverServer() {
|
||||
})
|
||||
|
||||
ginApp.POST("/", func(c *gin.Context) {
|
||||
data, err := ioutil.ReadAll(c.Request.Body)
|
||||
data, err := io.ReadAll(c.Request.Body)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
c.AbortWithStatus(http.StatusBadRequest)
|
||||
|
@@ -2,7 +2,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
@@ -10,13 +9,11 @@ import (
|
||||
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes/helm"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/resources"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
@@ -28,9 +25,8 @@ import (
|
||||
const cleanupTimeout = time.Minute
|
||||
|
||||
type tapState struct {
|
||||
startTime time.Time
|
||||
targetNamespaces []string
|
||||
selfServiceAccountExists bool
|
||||
startTime time.Time
|
||||
targetNamespaces []string
|
||||
}
|
||||
|
||||
var state tapState
|
||||
@@ -90,19 +86,12 @@ func tap() {
|
||||
}
|
||||
|
||||
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
|
||||
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
log.Info().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance.", misc.Software, misc.Program, misc.Software))
|
||||
postHubStarted(ctx, kubernetesProvider, cancel, true)
|
||||
log.Info().Msg("Updated Hub about the changes in the config. Exiting.")
|
||||
printProxyCommandSuggestion()
|
||||
} else {
|
||||
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
|
||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
|
||||
}
|
||||
|
||||
return
|
||||
rel, err := helm.NewHelmDefault().Install()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
} else {
|
||||
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
|
||||
}
|
||||
|
||||
defer finishTapExecution(kubernetesProvider)
|
||||
@@ -126,7 +115,7 @@ func printProxyCommandSuggestion() {
|
||||
}
|
||||
|
||||
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
||||
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, true)
|
||||
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -416,24 +405,7 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
|
||||
"/echo",
|
||||
)
|
||||
|
||||
if !update {
|
||||
// Create workers
|
||||
err := kubernetes.CreateWorkers(
|
||||
kubernetesProvider,
|
||||
state.selfServiceAccountExists,
|
||||
ctx,
|
||||
config.Config.Tap.SelfNamespace,
|
||||
config.Config.Tap.Resources.Worker,
|
||||
config.Config.ImagePullPolicy(),
|
||||
config.Config.ImagePullSecrets(),
|
||||
config.Config.Tap.ServiceMesh,
|
||||
config.Config.Tap.Tls,
|
||||
config.Config.Tap.Debug,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
} else {
|
||||
if update {
|
||||
// Pod regex
|
||||
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
|
||||
|
||||
|
@@ -19,24 +19,24 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
ConfigPathStr string `yaml:"configpath"`
|
||||
Context string `yaml:"context"`
|
||||
ConfigPathStr string `yaml:"configpath" json:"configpath"`
|
||||
Context string `yaml:"context" json:"context"`
|
||||
}
|
||||
|
||||
type ManifestsConfig struct {
|
||||
Dump bool `yaml:"dump"`
|
||||
Dump bool `yaml:"dump" json:"dump"`
|
||||
}
|
||||
|
||||
type ConfigStruct struct {
|
||||
Tap configStructs.TapConfig `yaml:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
|
||||
Kube KubeConfig `yaml:"kube"`
|
||||
DumpLogs bool `yaml:"dumplogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
License string `yaml:"license" default:""`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty"`
|
||||
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
Kube KubeConfig `yaml:"kube" json:"kube"`
|
||||
DumpLogs bool `yaml:"dumplogs" json:"dumplogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
|
||||
|
@@ -5,5 +5,5 @@ const (
|
||||
)
|
||||
|
||||
type ConfigConfig struct {
|
||||
Regenerate bool `yaml:"regenerate,omitempty" default:"false" readonly:""`
|
||||
Regenerate bool `yaml:"regenerate,omitempty" json:"regenerate,omitempty" default:"false" readonly:""`
|
||||
}
|
||||
|
@@ -13,7 +13,7 @@ const (
|
||||
)
|
||||
|
||||
type LogsConfig struct {
|
||||
FileStr string `yaml:"file"`
|
||||
FileStr string `yaml:"file" json:"file"`
|
||||
}
|
||||
|
||||
func (config *LogsConfig) Validate() error {
|
||||
|
@@ -2,7 +2,7 @@ package configStructs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
)
|
||||
|
||||
type ScriptingConfig struct {
|
||||
Env map[string]interface{} `yaml:"env"`
|
||||
Source string `yaml:"source" default:""`
|
||||
WatchScripts bool `yaml:"watchScripts" default:"true"`
|
||||
Env map[string]interface{} `yaml:"env" json:"env"`
|
||||
Source string `yaml:"source" json:"source" default:""`
|
||||
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
|
||||
}
|
||||
|
||||
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
|
||||
@@ -20,8 +20,8 @@ func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error)
|
||||
return
|
||||
}
|
||||
|
||||
var files []fs.FileInfo
|
||||
files, err = ioutil.ReadDir(config.Source)
|
||||
var files []fs.DirEntry
|
||||
files, err = os.ReadDir(config.Source)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@@ -33,46 +33,46 @@ const (
|
||||
)
|
||||
|
||||
type ResourceLimits struct {
|
||||
CPU string `yaml:"cpu" default:"750m"`
|
||||
Memory string `yaml:"memory" default:"1Gi"`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"750m"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"1Gi"`
|
||||
}
|
||||
|
||||
type ResourceRequests struct {
|
||||
CPU string `yaml:"cpu" default:"50m"`
|
||||
Memory string `yaml:"memory" default:"50Mi"`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
|
||||
}
|
||||
|
||||
type ResourceRequirements struct {
|
||||
Limits ResourceLimits `json:"limits"`
|
||||
Requests ResourceRequests `json:"requests"`
|
||||
Limits ResourceLimits `yaml:"limits" json:"limits"`
|
||||
Requests ResourceRequests `yaml:"requests" json:"requests"`
|
||||
}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SrvPort uint16 `yaml:"srvport" default:"8897"`
|
||||
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8897"`
|
||||
}
|
||||
|
||||
type HubConfig struct {
|
||||
Port uint16 `yaml:"port" default:"8898"`
|
||||
SrvPort uint16 `yaml:"srvport" default:"8898"`
|
||||
Port uint16 `yaml:"port" json:"port" default:"8898"`
|
||||
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8898"`
|
||||
}
|
||||
|
||||
type FrontConfig struct {
|
||||
Port uint16 `yaml:"port" default:"8899"`
|
||||
SrvPort uint16 `yaml:"srvport" default:"8899"`
|
||||
Port uint16 `yaml:"port" json:"port" default:"8899"`
|
||||
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8899"`
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
Worker WorkerConfig `yaml:"worker"`
|
||||
Hub HubConfig `yaml:"hub"`
|
||||
Front FrontConfig `yaml:"front"`
|
||||
Host string `yaml:"host" default:"127.0.0.1"`
|
||||
Worker WorkerConfig `yaml:"worker" json:"worker"`
|
||||
Hub HubConfig `yaml:"hub" json:"hub"`
|
||||
Front FrontConfig `yaml:"front" json:"front"`
|
||||
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
|
||||
}
|
||||
|
||||
type DockerConfig struct {
|
||||
Registry string `yaml:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" default:"latest"`
|
||||
ImagePullPolicy string `yaml:"imagepullpolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagepullsecrets"`
|
||||
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" json:"tag" default:"latest"`
|
||||
ImagePullPolicy string `yaml:"imagepullpolicy" json:"imagepullpolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagepullsecrets" json:"imagepullsecrets"`
|
||||
}
|
||||
|
||||
type ResourcesConfig struct {
|
||||
@@ -81,37 +81,37 @@ type ResourcesConfig struct {
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
ApprovedDomains []string `yaml:"approvedDomains"`
|
||||
ApprovedDomains []string `yaml:"approvedDomains" json:"approvedDomains"`
|
||||
}
|
||||
|
||||
type IngressConfig struct {
|
||||
Enabled bool `yaml:"enabled" default:"false"`
|
||||
Host string `yaml:"host" default:"ks.svc.cluster.local"`
|
||||
TLS []networking.IngressTLS `yaml:"tls"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
CertManager string `yaml:"certManager" default:"letsencrypt-prod"`
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
|
||||
TLS []networking.IngressTLS `yaml:"tls" json:"tls"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
CertManager string `yaml:"certManager" json:"certManager" default:"letsencrypt-prod"`
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
Docker DockerConfig `yaml:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces"`
|
||||
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
|
||||
PersistentStorage bool `yaml:"persistentstorage" default:"false"`
|
||||
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
|
||||
StorageClass string `yaml:"storageclass" default:"standard"`
|
||||
DryRun bool `yaml:"dryrun" default:"false"`
|
||||
Pcap string `yaml:"pcap" default:""`
|
||||
Resources ResourcesConfig `yaml:"resources"`
|
||||
ServiceMesh bool `yaml:"servicemesh" default:"true"`
|
||||
Tls bool `yaml:"tls" default:"true"`
|
||||
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
|
||||
IgnoreTainted bool `yaml:"ignoreTainted" default:"false"`
|
||||
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
|
||||
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"`
|
||||
Ingress IngressConfig `yaml:"ingress"`
|
||||
Debug bool `yaml:"debug" default:"false"`
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces"`
|
||||
SelfNamespace string `yaml:"selfnamespace" json:"selfnamespace" default:"kubeshark"`
|
||||
PersistentStorage bool `yaml:"persistentstorage" json:"persistentstorage" default:"false"`
|
||||
StorageLimit string `yaml:"storagelimit" json:"storagelimit" default:"200Mi"`
|
||||
StorageClass string `yaml:"storageclass" json:"storageclass" default:"standard"`
|
||||
DryRun bool `yaml:"dryrun" json:"dryrun" default:"false"`
|
||||
Pcap string `yaml:"pcap" json:"pcap" default:""`
|
||||
Resources ResourcesConfig `yaml:"resources" json:"resources"`
|
||||
ServiceMesh bool `yaml:"servicemesh" json:"servicemesh" default:"true"`
|
||||
Tls bool `yaml:"tls" json:"tls" default:"true"`
|
||||
PacketCapture string `yaml:"packetcapture" json:"packetcapture" default:"libpcap"`
|
||||
IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"`
|
||||
ResourceLabels map[string]string `yaml:"resourceLabels" json:"resourceLabels" default:"{}"`
|
||||
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"[]"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
|
155
go.mod
155
go.mod
@@ -1,118 +1,157 @@
|
||||
module github.com/kubeshark/kubeshark
|
||||
|
||||
go 1.17
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/creasty/defaults v1.5.2
|
||||
github.com/docker/docker v20.10.24+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/fsnotify/fsnotify v1.5.1
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gin-gonic/gin v1.7.7
|
||||
github.com/google/go-github/v37 v37.0.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/ohler55/ojg v1.14.5
|
||||
github.com/otiai10/copy v1.10.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/robertkrimen/otto v0.2.1
|
||||
github.com/rs/zerolog v1.28.0
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/client-go v0.23.3
|
||||
k8s.io/kubectl v0.23.3
|
||||
helm.sh/helm/v3 v3.12.0
|
||||
k8s.io/api v0.27.1
|
||||
k8s.io/apimachinery v0.27.1
|
||||
k8s.io/client-go v0.27.1
|
||||
k8s.io/kubectl v0.27.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.2.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.3 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.0+incompatible // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/cli v20.10.21+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-playground/locales v0.13.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.4.1 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/leodido/go-urn v1.2.0 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday v1.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rubenv/sql-migrate v1.3.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/sourcemap.v1 v1.0.5 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/cli-runtime v0.23.3 // indirect
|
||||
k8s.io/component-base v0.23.3 // indirect
|
||||
k8s.io/klog/v2 v2.40.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
|
||||
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.11.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.1 // indirect
|
||||
k8s.io/apiserver v0.27.1 // indirect
|
||||
k8s.io/cli-runtime v0.27.1 // indirect
|
||||
k8s.io/component-base v0.27.1 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
oras.land/oras-go v1.2.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.2 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
apiVersion: v1
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
|
187
kubernetes/helm/helm.go
Normal file
187
kubernetes/helm/helm.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package helm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/downloader"
|
||||
"helm.sh/helm/v3/pkg/getter"
|
||||
"helm.sh/helm/v3/pkg/kube"
|
||||
"helm.sh/helm/v3/pkg/registry"
|
||||
"helm.sh/helm/v3/pkg/release"
|
||||
"helm.sh/helm/v3/pkg/repo"
|
||||
)
|
||||
|
||||
const ENV_HELM_DRIVER = "HELM_DRIVER"
|
||||
|
||||
var settings = cli.New()
|
||||
|
||||
type Helm struct {
|
||||
repo string
|
||||
releaseName string
|
||||
releaseNamespace string
|
||||
}
|
||||
|
||||
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
|
||||
return &Helm{
|
||||
repo: repo,
|
||||
releaseName: releaseName,
|
||||
releaseNamespace: releaseNamespace,
|
||||
}
|
||||
}
|
||||
|
||||
func NewHelmDefault() *Helm {
|
||||
return &Helm{
|
||||
repo: "https://helm.kubeshark.co",
|
||||
releaseName: "kubeshark",
|
||||
releaseNamespace: "default",
|
||||
}
|
||||
}
|
||||
|
||||
func parseOCIRef(chartRef string) (string, string, error) {
|
||||
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
|
||||
caps := refTagRegexp.FindStringSubmatch(chartRef)
|
||||
if len(caps) != 4 {
|
||||
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
|
||||
}
|
||||
chartRef = caps[1]
|
||||
tag := caps[3]
|
||||
|
||||
return chartRef, tag, nil
|
||||
}
|
||||
|
||||
func (h *Helm) Install() (rel *release.Release, err error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
actionConfig := new(action.Configuration)
|
||||
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
|
||||
log.Info().Msgf(format, v...)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := action.NewInstall(actionConfig)
|
||||
client.Namespace = h.releaseNamespace
|
||||
client.ReleaseName = h.releaseName
|
||||
|
||||
var chartURL string
|
||||
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var cp string
|
||||
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m := &downloader.Manager{
|
||||
Out: os.Stdout,
|
||||
ChartPath: cp,
|
||||
Keyring: client.ChartPathOptions.Keyring,
|
||||
SkipUpdate: false,
|
||||
Getters: getter.All(settings),
|
||||
RepositoryConfig: settings.RepositoryConfig,
|
||||
RepositoryCache: settings.RepositoryCache,
|
||||
Debug: settings.Debug,
|
||||
}
|
||||
|
||||
dl := downloader.ChartDownloader{
|
||||
Out: m.Out,
|
||||
Verify: m.Verify,
|
||||
Keyring: m.Keyring,
|
||||
RepositoryConfig: m.RepositoryConfig,
|
||||
RepositoryCache: m.RepositoryCache,
|
||||
RegistryClient: m.RegistryClient,
|
||||
Getters: m.Getters,
|
||||
Options: []getter.Option{
|
||||
getter.WithInsecureSkipVerifyTLS(false),
|
||||
},
|
||||
}
|
||||
|
||||
repoPath := filepath.Dir(m.ChartPath)
|
||||
err = os.MkdirAll(repoPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version := ""
|
||||
if registry.IsOCI(chartURL) {
|
||||
chartURL, version, err = parseOCIRef(chartURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dl.Options = append(dl.Options,
|
||||
getter.WithRegistryClient(m.RegistryClient),
|
||||
getter.WithTagName(version))
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("url", chartURL).
|
||||
Str("repo-path", repoPath).
|
||||
Msg("Downloading Helm chart:")
|
||||
|
||||
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var chart *chart.Chart
|
||||
chart, err = loader.Load(m.ChartPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("release", chart.Metadata.Name).
|
||||
Str("version", chart.Metadata.Version).
|
||||
Strs("source", chart.Metadata.Sources).
|
||||
Str("kube-version", chart.Metadata.KubeVersion).
|
||||
Msg("Installing using Helm:")
|
||||
|
||||
var configMarshalled []byte
|
||||
configMarshalled, err = json.Marshal(config.Config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var configUnmarshalled map[string]interface{}
|
||||
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rel, err = client.Run(chart, configUnmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
actionConfig := new(action.Configuration)
|
||||
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
|
||||
log.Info().Msgf(format, v...)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := action.NewUninstall(actionConfig)
|
||||
|
||||
resp, err = client.Run(h.releaseName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -1,147 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
|
||||
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
v1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
type DaemonSetPod struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
Spec core.PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
type DaemonSetSpec struct {
|
||||
Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
|
||||
Template DaemonSetPod `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
|
||||
}
|
||||
|
||||
type DaemonSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
func (d *DaemonSet) GenerateApplyConfiguration(name string, namespace string, podName string, provider *Provider) *applyconfapp.DaemonSetApplyConfiguration {
|
||||
// Pod
|
||||
p := d.Spec.Template.Spec
|
||||
podSpec := applyconfcore.PodSpec()
|
||||
podSpec.WithHostNetwork(p.HostNetwork)
|
||||
podSpec.WithDNSPolicy(p.DNSPolicy)
|
||||
podSpec.WithTerminationGracePeriodSeconds(*p.TerminationGracePeriodSeconds)
|
||||
podSpec.WithServiceAccountName(p.ServiceAccountName)
|
||||
|
||||
// Containers
|
||||
for _, c := range d.Spec.Template.Spec.Containers {
|
||||
// Common
|
||||
container := applyconfcore.Container()
|
||||
container.WithName(c.Name)
|
||||
container.WithImage(c.Image)
|
||||
container.WithImagePullPolicy(c.ImagePullPolicy)
|
||||
container.WithCommand(c.Command...)
|
||||
|
||||
// Linux capabilities
|
||||
caps := applyconfcore.Capabilities().WithAdd(c.SecurityContext.Capabilities.Add...).WithDrop(c.SecurityContext.Capabilities.Drop...)
|
||||
container.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
||||
|
||||
// Environment variables
|
||||
var envvars []*v1.EnvVarApplyConfiguration
|
||||
for _, e := range c.Env {
|
||||
envvars = append(envvars, applyconfcore.EnvVar().WithName(e.Name).WithValue(e.Value))
|
||||
}
|
||||
container.WithEnv(envvars...)
|
||||
|
||||
// Resource limits
|
||||
resources := applyconfcore.ResourceRequirements().WithRequests(c.Resources.Requests).WithLimits(c.Resources.Limits)
|
||||
container.WithResources(resources)
|
||||
|
||||
// Volume mounts
|
||||
for _, m := range c.VolumeMounts {
|
||||
volumeMount := applyconfcore.VolumeMount().WithName(m.Name).WithMountPath(m.MountPath).WithReadOnly(m.ReadOnly)
|
||||
container.WithVolumeMounts(volumeMount)
|
||||
}
|
||||
|
||||
podSpec.WithContainers(container)
|
||||
}
|
||||
|
||||
// Node affinity (RequiredDuringSchedulingIgnoredDuringExecution only)
|
||||
if p.Affinity != nil {
|
||||
nodeSelector := applyconfcore.NodeSelector()
|
||||
for _, term := range p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
|
||||
for _, selector := range term.MatchExpressions {
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey(selector.Key)
|
||||
nodeSelectorRequirement.WithOperator(selector.Operator)
|
||||
nodeSelectorRequirement.WithValues(selector.Values...)
|
||||
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
|
||||
}
|
||||
for _, selector := range term.MatchFields {
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey(selector.Key)
|
||||
nodeSelectorRequirement.WithOperator(selector.Operator)
|
||||
nodeSelectorRequirement.WithValues(selector.Values...)
|
||||
nodeSelectorTerm.WithMatchFields(nodeSelectorRequirement)
|
||||
}
|
||||
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
|
||||
}
|
||||
nodeAffinity := applyconfcore.NodeAffinity()
|
||||
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
|
||||
affinity := applyconfcore.Affinity()
|
||||
affinity.WithNodeAffinity(nodeAffinity)
|
||||
podSpec.WithAffinity(affinity)
|
||||
}
|
||||
|
||||
// Tolerations
|
||||
for _, t := range p.Tolerations {
|
||||
toleration := applyconfcore.Toleration()
|
||||
toleration.WithKey(t.Key)
|
||||
toleration.WithOperator(t.Operator)
|
||||
toleration.WithValue(t.Value)
|
||||
toleration.WithEffect(t.Effect)
|
||||
if t.TolerationSeconds != nil {
|
||||
toleration.WithTolerationSeconds(*t.TolerationSeconds)
|
||||
}
|
||||
podSpec.WithTolerations(toleration)
|
||||
}
|
||||
|
||||
// Volumes
|
||||
for _, v := range p.Volumes {
|
||||
volume := applyconfcore.Volume()
|
||||
if v.HostPath != nil {
|
||||
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
|
||||
}
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
volume.WithName(v.Name).WithPersistentVolumeClaim(applyconfcore.PersistentVolumeClaimVolumeSource().WithClaimName(v.PersistentVolumeClaim.ClaimName))
|
||||
}
|
||||
podSpec.WithVolumes(volume)
|
||||
}
|
||||
|
||||
// Image pull secrets
|
||||
if len(p.ImagePullSecrets) > 0 {
|
||||
localObjectReference := applyconfcore.LocalObjectReference()
|
||||
for _, o := range p.ImagePullSecrets {
|
||||
localObjectReference.WithName(o.Name)
|
||||
}
|
||||
podSpec.WithImagePullSecrets(localObjectReference)
|
||||
}
|
||||
|
||||
podTemplate := applyconfcore.PodTemplateSpec()
|
||||
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
|
||||
"app": podName,
|
||||
}, provider))
|
||||
podTemplate.WithSpec(podSpec)
|
||||
|
||||
labelSelector := applyconfmeta.LabelSelector()
|
||||
labelSelector.WithMatchLabels(map[string]string{"app": podName})
|
||||
|
||||
daemonSet := applyconfapp.DaemonSet(name, namespace)
|
||||
daemonSet.
|
||||
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
|
||||
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
|
||||
|
||||
return daemonSet
|
||||
}
|
@@ -1,16 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
)
|
||||
|
||||
func buildWithDefaultLabels(labels map[string]string, provider *Provider) map[string]string {
|
||||
labels[LabelManagedBy] = provider.managedBy
|
||||
labels[LabelCreatedBy] = provider.createdBy
|
||||
|
||||
for k, v := range config.Config.Tap.ResourceLabels {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
@@ -1,71 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/rs/zerolog/log"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func CreateWorkers(
|
||||
kubernetesProvider *Provider,
|
||||
selfServiceAccountExists bool,
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
resources configStructs.ResourceRequirements,
|
||||
imagePullPolicy core.PullPolicy,
|
||||
imagePullSecrets []core.LocalObjectReference,
|
||||
serviceMesh bool,
|
||||
tls bool,
|
||||
debug bool,
|
||||
) error {
|
||||
if config.Config.Tap.PersistentStorage {
|
||||
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
|
||||
ctx,
|
||||
namespace,
|
||||
persistentVolumeClaim,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
image := docker.GetWorkerImage()
|
||||
|
||||
var serviceAccountName string
|
||||
if selfServiceAccountExists {
|
||||
serviceAccountName = ServiceAccountName
|
||||
} else {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
log.Info().Msg("Creating the worker DaemonSet...")
|
||||
|
||||
if err := kubernetesProvider.ApplyWorkerDaemonSet(
|
||||
ctx,
|
||||
namespace,
|
||||
WorkerDaemonSetName,
|
||||
image,
|
||||
WorkerPodName,
|
||||
serviceAccountName,
|
||||
resources,
|
||||
imagePullPolicy,
|
||||
imagePullSecrets,
|
||||
serviceMesh,
|
||||
tls,
|
||||
debug,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msg("Successfully created the worker DaemonSet.")
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
|
@@ -1,4 +1,3 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
|
@@ -1,178 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/errormessage"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfResourcesNamespace string) {
|
||||
log.Warn().Msg(fmt.Sprintf("Removing %s resources...", misc.Software))
|
||||
|
||||
var leftoverResources []string
|
||||
|
||||
if isNsRestrictedMode {
|
||||
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, selfResourcesNamespace)
|
||||
} else {
|
||||
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
|
||||
}
|
||||
|
||||
if len(leftoverResources) > 0 {
|
||||
errMsg := "Failed to remove the following resources."
|
||||
for _, resource := range leftoverResources {
|
||||
errMsg += "\n- " + resource
|
||||
}
|
||||
log.Error().Msg(fmt.Sprintf(utils.Red, errMsg))
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
|
||||
leftoverResources := make([]string, 0)
|
||||
|
||||
if err := kubernetesProvider.RemoveIngressClass(ctx, kubernetes.IngressClassName); err != nil {
|
||||
resourceDesc := kubernetes.IngressClassName
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
|
||||
}
|
||||
|
||||
if resources, err := kubernetesProvider.ListManagedClusterRoles(ctx); err != nil {
|
||||
resourceDesc := "ClusterRoles"
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
for _, resource := range resources.Items {
|
||||
if err := kubernetesProvider.RemoveClusterRole(ctx, resource.Name); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ClusterRole %s", resource.Name)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resources, err := kubernetesProvider.ListManagedClusterRoleBindings(ctx); err != nil {
|
||||
resourceDesc := "ClusterRoleBindings"
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
for _, resource := range resources.Items {
|
||||
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, resource.Name); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", resource.Name)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return leftoverResources
|
||||
}
|
||||
|
||||
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) {
|
||||
// Call cancel if a terminating signal was received. Allows user to skip the wait.
|
||||
go func() {
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
}()
|
||||
|
||||
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, selfResourcesNamespace); err != nil {
|
||||
switch {
|
||||
case ctx.Err() == context.Canceled:
|
||||
log.Printf("Do nothing. User interrupted the wait")
|
||||
log.Warn().
|
||||
Str("namespace", selfResourcesNamespace).
|
||||
Msg("Did nothing. User interrupted the wait.")
|
||||
case err == wait.ErrWaitTimeout:
|
||||
log.Warn().
|
||||
Str("namespace", selfResourcesNamespace).
|
||||
Msg("Timed out while deleting the namespace.")
|
||||
default:
|
||||
log.Warn().
|
||||
Err(errormessage.FormatError(err)).
|
||||
Str("namespace", selfResourcesNamespace).
|
||||
Msg("Unknown error while deleting the namespace.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
|
||||
leftoverResources := make([]string, 0)
|
||||
|
||||
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.FrontServiceName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.HubServiceName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, selfResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Persistent Volume %s in namespace %s", kubernetes.PersistentVolumeClaimName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
for _, resource := range resources.Items {
|
||||
if err := kubernetesProvider.RemoveServiceAccount(ctx, selfResourcesNamespace, resource.Name); err != nil {
|
||||
resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resources, err := kubernetesProvider.ListManagedRoles(ctx, selfResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Roles in namespace %s", selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
for _, resource := range resources.Items {
|
||||
if err := kubernetesProvider.RemoveRole(ctx, selfResourcesNamespace, resource.Name); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, selfResourcesNamespace); err != nil {
|
||||
resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
} else {
|
||||
for _, resource := range resources.Items {
|
||||
if err := kubernetesProvider.RemoveRoleBinding(ctx, selfResourcesNamespace, resource.Name); err != nil {
|
||||
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.HubPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.FrontPodName); err != nil {
|
||||
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, selfResourcesNamespace)
|
||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||
}
|
||||
|
||||
return leftoverResources
|
||||
}
|
||||
|
||||
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
|
||||
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Error while removing %s", resourceDesc))
|
||||
*leftoverResources = append(*leftoverResources, resourceDesc)
|
||||
}
|
@@ -1,117 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/errormessage"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.ResourceRequirements, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
|
||||
if !isNsRestrictedMode {
|
||||
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace)
|
||||
var selfServiceAccountExists bool
|
||||
if err != nil {
|
||||
selfServiceAccountExists = true
|
||||
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
|
||||
}
|
||||
|
||||
hubOpts := &kubernetes.PodOptions{
|
||||
Namespace: selfNamespace,
|
||||
PodName: kubernetes.HubPodName,
|
||||
PodImage: docker.GetHubImage(),
|
||||
ServiceAccountName: kubernetes.ServiceAccountName,
|
||||
Resources: hubResources,
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Debug: debug,
|
||||
}
|
||||
|
||||
frontOpts := &kubernetes.PodOptions{
|
||||
Namespace: selfNamespace,
|
||||
PodName: kubernetes.FrontPodName,
|
||||
PodImage: docker.GetWorkerImage(),
|
||||
ServiceAccountName: kubernetes.ServiceAccountName,
|
||||
Resources: hubResources,
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Debug: debug,
|
||||
}
|
||||
|
||||
if err := createSelfHubPod(ctx, kubernetesProvider, hubOpts); err != nil {
|
||||
return selfServiceAccountExists, err
|
||||
}
|
||||
|
||||
if err := createFrontPod(ctx, kubernetesProvider, frontOpts); err != nil {
|
||||
return selfServiceAccountExists, err
|
||||
}
|
||||
|
||||
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace))
|
||||
if err != nil {
|
||||
return selfServiceAccountExists, err
|
||||
}
|
||||
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
|
||||
|
||||
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace))
|
||||
if err != nil {
|
||||
return selfServiceAccountExists, err
|
||||
}
|
||||
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
|
||||
|
||||
if config.Config.Tap.Ingress.Enabled {
|
||||
_, err = kubernetesProvider.CreateIngressClass(ctx, kubernetesProvider.BuildIngressClass())
|
||||
if err != nil {
|
||||
return selfServiceAccountExists, err
|
||||
}
|
||||
log.Info().Str("ingress-class", kubernetes.IngressClassName).Msg("Successfully created an ingress class.")
|
||||
|
||||
_, err = kubernetesProvider.CreateIngress(ctx, selfNamespace, kubernetesProvider.BuildIngress())
|
||||
if err != nil {
|
||||
return selfServiceAccountExists, err
|
||||
}
|
||||
log.Info().Str("ingress", kubernetes.IngressName).Msg("Successfully created an ingress.")
|
||||
}
|
||||
|
||||
return selfServiceAccountExists, nil
|
||||
}
|
||||
|
||||
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
|
||||
_, err := kubernetesProvider.CreateNamespace(ctx, kubernetesProvider.BuildNamespace(selfNamespace))
|
||||
return err
|
||||
}
|
||||
|
||||
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
|
||||
pod, err := kubernetesProvider.BuildHubPod(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
|
||||
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
|
||||
return nil
|
||||
}
|
@@ -2,7 +2,6 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
@@ -12,18 +11,6 @@ const (
|
||||
tab = "\t"
|
||||
)
|
||||
|
||||
func PrettyJson(data interface{}) (string, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := json.NewEncoder(buffer)
|
||||
encoder.SetIndent(empty, tab)
|
||||
|
||||
err := encoder.Encode(data)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
func PrettyYaml(data interface{}) (string, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := yaml.NewEncoder(buffer)
|
||||
@@ -35,18 +22,3 @@ func PrettyYaml(data interface{}) (string, error) {
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
func PrettyYamlOmitEmpty(data interface{}) (string, error) {
|
||||
d, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
var cleanData map[string]interface{}
|
||||
err = json.Unmarshal(d, &cleanData)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
return PrettyYaml(cleanData)
|
||||
}
|
||||
|
Reference in New Issue
Block a user