mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-15 22:31:44 +00:00
✨ Use the Helm chart in tap
command to install Kubeshark (#1362)
* ✨ Use the Helm chart in `tap` command to install Kubeshark * ⬆️ Set Go version to `1.19` in `go.mod` file * ✨ Add `Helm` struct`, `NewHelm` and `NewHelmDefault` methods * ⚡ Better logging and error return * ⚡ Pass the config as `values.yaml` to Helm install * 🔥 Remove `helm-chart`, `manifests` and `check` commands * ➖ Run `go mod tidy` * 🎨 Move `helm` package into `kubernetes` package * 🔥 Remove `# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!` notice from the manifests and Helm templates * 🔥 Remove the unused `GenerateApplyConfiguration` and `buildWithDefaultLabels` methods
This commit is contained in:
187
kubernetes/helm/helm.go
Normal file
187
kubernetes/helm/helm.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package helm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/downloader"
|
||||
"helm.sh/helm/v3/pkg/getter"
|
||||
"helm.sh/helm/v3/pkg/kube"
|
||||
"helm.sh/helm/v3/pkg/registry"
|
||||
"helm.sh/helm/v3/pkg/release"
|
||||
"helm.sh/helm/v3/pkg/repo"
|
||||
)
|
||||
|
||||
const ENV_HELM_DRIVER = "HELM_DRIVER"
|
||||
|
||||
var settings = cli.New()
|
||||
|
||||
type Helm struct {
|
||||
repo string
|
||||
releaseName string
|
||||
releaseNamespace string
|
||||
}
|
||||
|
||||
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
|
||||
return &Helm{
|
||||
repo: repo,
|
||||
releaseName: releaseName,
|
||||
releaseNamespace: releaseNamespace,
|
||||
}
|
||||
}
|
||||
|
||||
func NewHelmDefault() *Helm {
|
||||
return &Helm{
|
||||
repo: "https://helm.kubeshark.co",
|
||||
releaseName: "kubeshark",
|
||||
releaseNamespace: "default",
|
||||
}
|
||||
}
|
||||
|
||||
func parseOCIRef(chartRef string) (string, string, error) {
|
||||
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
|
||||
caps := refTagRegexp.FindStringSubmatch(chartRef)
|
||||
if len(caps) != 4 {
|
||||
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
|
||||
}
|
||||
chartRef = caps[1]
|
||||
tag := caps[3]
|
||||
|
||||
return chartRef, tag, nil
|
||||
}
|
||||
|
||||
func (h *Helm) Install() (rel *release.Release, err error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
actionConfig := new(action.Configuration)
|
||||
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
|
||||
log.Info().Msgf(format, v...)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := action.NewInstall(actionConfig)
|
||||
client.Namespace = h.releaseNamespace
|
||||
client.ReleaseName = h.releaseName
|
||||
|
||||
var chartURL string
|
||||
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var cp string
|
||||
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m := &downloader.Manager{
|
||||
Out: os.Stdout,
|
||||
ChartPath: cp,
|
||||
Keyring: client.ChartPathOptions.Keyring,
|
||||
SkipUpdate: false,
|
||||
Getters: getter.All(settings),
|
||||
RepositoryConfig: settings.RepositoryConfig,
|
||||
RepositoryCache: settings.RepositoryCache,
|
||||
Debug: settings.Debug,
|
||||
}
|
||||
|
||||
dl := downloader.ChartDownloader{
|
||||
Out: m.Out,
|
||||
Verify: m.Verify,
|
||||
Keyring: m.Keyring,
|
||||
RepositoryConfig: m.RepositoryConfig,
|
||||
RepositoryCache: m.RepositoryCache,
|
||||
RegistryClient: m.RegistryClient,
|
||||
Getters: m.Getters,
|
||||
Options: []getter.Option{
|
||||
getter.WithInsecureSkipVerifyTLS(false),
|
||||
},
|
||||
}
|
||||
|
||||
repoPath := filepath.Dir(m.ChartPath)
|
||||
err = os.MkdirAll(repoPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version := ""
|
||||
if registry.IsOCI(chartURL) {
|
||||
chartURL, version, err = parseOCIRef(chartURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dl.Options = append(dl.Options,
|
||||
getter.WithRegistryClient(m.RegistryClient),
|
||||
getter.WithTagName(version))
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("url", chartURL).
|
||||
Str("repo-path", repoPath).
|
||||
Msg("Downloading Helm chart:")
|
||||
|
||||
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var chart *chart.Chart
|
||||
chart, err = loader.Load(m.ChartPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("release", chart.Metadata.Name).
|
||||
Str("version", chart.Metadata.Version).
|
||||
Strs("source", chart.Metadata.Sources).
|
||||
Str("kube-version", chart.Metadata.KubeVersion).
|
||||
Msg("Installing using Helm:")
|
||||
|
||||
var configMarshalled []byte
|
||||
configMarshalled, err = json.Marshal(config.Config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var configUnmarshalled map[string]interface{}
|
||||
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rel, err = client.Run(chart, configUnmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
actionConfig := new(action.Configuration)
|
||||
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
|
||||
log.Info().Msgf(format, v...)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := action.NewUninstall(actionConfig)
|
||||
|
||||
resp, err = client.Run(h.releaseName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -1,147 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
|
||||
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
v1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
type DaemonSetPod struct {
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
Spec core.PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
type DaemonSetSpec struct {
|
||||
Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
|
||||
Template DaemonSetPod `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
|
||||
}
|
||||
|
||||
type DaemonSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
func (d *DaemonSet) GenerateApplyConfiguration(name string, namespace string, podName string, provider *Provider) *applyconfapp.DaemonSetApplyConfiguration {
|
||||
// Pod
|
||||
p := d.Spec.Template.Spec
|
||||
podSpec := applyconfcore.PodSpec()
|
||||
podSpec.WithHostNetwork(p.HostNetwork)
|
||||
podSpec.WithDNSPolicy(p.DNSPolicy)
|
||||
podSpec.WithTerminationGracePeriodSeconds(*p.TerminationGracePeriodSeconds)
|
||||
podSpec.WithServiceAccountName(p.ServiceAccountName)
|
||||
|
||||
// Containers
|
||||
for _, c := range d.Spec.Template.Spec.Containers {
|
||||
// Common
|
||||
container := applyconfcore.Container()
|
||||
container.WithName(c.Name)
|
||||
container.WithImage(c.Image)
|
||||
container.WithImagePullPolicy(c.ImagePullPolicy)
|
||||
container.WithCommand(c.Command...)
|
||||
|
||||
// Linux capabilities
|
||||
caps := applyconfcore.Capabilities().WithAdd(c.SecurityContext.Capabilities.Add...).WithDrop(c.SecurityContext.Capabilities.Drop...)
|
||||
container.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
||||
|
||||
// Environment variables
|
||||
var envvars []*v1.EnvVarApplyConfiguration
|
||||
for _, e := range c.Env {
|
||||
envvars = append(envvars, applyconfcore.EnvVar().WithName(e.Name).WithValue(e.Value))
|
||||
}
|
||||
container.WithEnv(envvars...)
|
||||
|
||||
// Resource limits
|
||||
resources := applyconfcore.ResourceRequirements().WithRequests(c.Resources.Requests).WithLimits(c.Resources.Limits)
|
||||
container.WithResources(resources)
|
||||
|
||||
// Volume mounts
|
||||
for _, m := range c.VolumeMounts {
|
||||
volumeMount := applyconfcore.VolumeMount().WithName(m.Name).WithMountPath(m.MountPath).WithReadOnly(m.ReadOnly)
|
||||
container.WithVolumeMounts(volumeMount)
|
||||
}
|
||||
|
||||
podSpec.WithContainers(container)
|
||||
}
|
||||
|
||||
// Node affinity (RequiredDuringSchedulingIgnoredDuringExecution only)
|
||||
if p.Affinity != nil {
|
||||
nodeSelector := applyconfcore.NodeSelector()
|
||||
for _, term := range p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
|
||||
for _, selector := range term.MatchExpressions {
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey(selector.Key)
|
||||
nodeSelectorRequirement.WithOperator(selector.Operator)
|
||||
nodeSelectorRequirement.WithValues(selector.Values...)
|
||||
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
|
||||
}
|
||||
for _, selector := range term.MatchFields {
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey(selector.Key)
|
||||
nodeSelectorRequirement.WithOperator(selector.Operator)
|
||||
nodeSelectorRequirement.WithValues(selector.Values...)
|
||||
nodeSelectorTerm.WithMatchFields(nodeSelectorRequirement)
|
||||
}
|
||||
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
|
||||
}
|
||||
nodeAffinity := applyconfcore.NodeAffinity()
|
||||
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
|
||||
affinity := applyconfcore.Affinity()
|
||||
affinity.WithNodeAffinity(nodeAffinity)
|
||||
podSpec.WithAffinity(affinity)
|
||||
}
|
||||
|
||||
// Tolerations
|
||||
for _, t := range p.Tolerations {
|
||||
toleration := applyconfcore.Toleration()
|
||||
toleration.WithKey(t.Key)
|
||||
toleration.WithOperator(t.Operator)
|
||||
toleration.WithValue(t.Value)
|
||||
toleration.WithEffect(t.Effect)
|
||||
if t.TolerationSeconds != nil {
|
||||
toleration.WithTolerationSeconds(*t.TolerationSeconds)
|
||||
}
|
||||
podSpec.WithTolerations(toleration)
|
||||
}
|
||||
|
||||
// Volumes
|
||||
for _, v := range p.Volumes {
|
||||
volume := applyconfcore.Volume()
|
||||
if v.HostPath != nil {
|
||||
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
|
||||
}
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
volume.WithName(v.Name).WithPersistentVolumeClaim(applyconfcore.PersistentVolumeClaimVolumeSource().WithClaimName(v.PersistentVolumeClaim.ClaimName))
|
||||
}
|
||||
podSpec.WithVolumes(volume)
|
||||
}
|
||||
|
||||
// Image pull secrets
|
||||
if len(p.ImagePullSecrets) > 0 {
|
||||
localObjectReference := applyconfcore.LocalObjectReference()
|
||||
for _, o := range p.ImagePullSecrets {
|
||||
localObjectReference.WithName(o.Name)
|
||||
}
|
||||
podSpec.WithImagePullSecrets(localObjectReference)
|
||||
}
|
||||
|
||||
podTemplate := applyconfcore.PodTemplateSpec()
|
||||
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
|
||||
"app": podName,
|
||||
}, provider))
|
||||
podTemplate.WithSpec(podSpec)
|
||||
|
||||
labelSelector := applyconfmeta.LabelSelector()
|
||||
labelSelector.WithMatchLabels(map[string]string{"app": podName})
|
||||
|
||||
daemonSet := applyconfapp.DaemonSet(name, namespace)
|
||||
daemonSet.
|
||||
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
|
||||
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
|
||||
|
||||
return daemonSet
|
||||
}
|
@@ -1,16 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
)
|
||||
|
||||
func buildWithDefaultLabels(labels map[string]string, provider *Provider) map[string]string {
|
||||
labels[LabelManagedBy] = provider.managedBy
|
||||
labels[LabelCreatedBy] = provider.createdBy
|
||||
|
||||
for k, v := range config.Config.Tap.ResourceLabels {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
@@ -1,71 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/rs/zerolog/log"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func CreateWorkers(
|
||||
kubernetesProvider *Provider,
|
||||
selfServiceAccountExists bool,
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
resources configStructs.ResourceRequirements,
|
||||
imagePullPolicy core.PullPolicy,
|
||||
imagePullSecrets []core.LocalObjectReference,
|
||||
serviceMesh bool,
|
||||
tls bool,
|
||||
debug bool,
|
||||
) error {
|
||||
if config.Config.Tap.PersistentStorage {
|
||||
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
|
||||
ctx,
|
||||
namespace,
|
||||
persistentVolumeClaim,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
image := docker.GetWorkerImage()
|
||||
|
||||
var serviceAccountName string
|
||||
if selfServiceAccountExists {
|
||||
serviceAccountName = ServiceAccountName
|
||||
} else {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
log.Info().Msg("Creating the worker DaemonSet...")
|
||||
|
||||
if err := kubernetesProvider.ApplyWorkerDaemonSet(
|
||||
ctx,
|
||||
namespace,
|
||||
WorkerDaemonSetName,
|
||||
image,
|
||||
WorkerPodName,
|
||||
serviceAccountName,
|
||||
resources,
|
||||
imagePullPolicy,
|
||||
imagePullSecrets,
|
||||
serviceMesh,
|
||||
tls,
|
||||
debug,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msg("Successfully created the worker DaemonSet.")
|
||||
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user