mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-08-08 03:54:18 +00:00
⚡ Fix the issues in Helm chart such that helm template
succeeds
This commit is contained in:
parent
d2b9bddf78
commit
18addbb980
@ -140,10 +140,10 @@ var hubPodMappings = map[string]interface{}{
|
|||||||
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
|
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
|
||||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
||||||
"spec.imagePullSecrets": "{{ .Values.tap.docker.imagepullsecrets }}",
|
"spec.imagePullSecrets": "{{ .Values.tap.docker.imagepullsecrets }}",
|
||||||
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.cpu-limit }}",
|
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
|
||||||
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.memory-limit }}",
|
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
|
||||||
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.cpu-requests }}",
|
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
|
||||||
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.memory-requests }}",
|
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.requests.memory }}",
|
||||||
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
|
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
|
||||||
}
|
}
|
||||||
var hubServiceMappings = namespaceMappings
|
var hubServiceMappings = namespaceMappings
|
||||||
@ -159,10 +159,10 @@ var workerDaemonSetMappings = map[string]interface{}{
|
|||||||
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
|
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
|
||||||
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
||||||
"spec.imagePullSecrets": "{{ .Values.tap.docker.imagepullsecrets }}",
|
"spec.imagePullSecrets": "{{ .Values.tap.docker.imagepullsecrets }}",
|
||||||
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.cpu-limit }}",
|
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
|
||||||
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.memory-limit }}",
|
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
|
||||||
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.cpu-requests }}",
|
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
|
||||||
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.memory-requests }}",
|
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
|
||||||
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
|
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
|
||||||
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
|
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
|
||||||
}
|
}
|
||||||
|
@ -29,11 +29,19 @@ const (
|
|||||||
DebugLabel = "debug"
|
DebugLabel = "debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Resources struct {
|
type ResourceLimits struct {
|
||||||
CpuLimit string `yaml:"cpu-limit" default:"750m"`
|
CPU string `yaml:"cpu" default:"750m"`
|
||||||
MemoryLimit string `yaml:"memory-limit" default:"1Gi"`
|
Memory string `yaml:"memory" default:"1Gi"`
|
||||||
CpuRequests string `yaml:"cpu-requests" default:"50m"`
|
}
|
||||||
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
|
|
||||||
|
type ResourceRequests struct {
|
||||||
|
CPU string `yaml:"cpu" default:"50m"`
|
||||||
|
Memory string `yaml:"memory" default:"50Mi"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceRequirements struct {
|
||||||
|
Limits ResourceLimits `json:"limits"`
|
||||||
|
Requests ResourceRequests `json:"requests"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type WorkerConfig struct {
|
type WorkerConfig struct {
|
||||||
@ -66,8 +74,8 @@ type DockerConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ResourcesConfig struct {
|
type ResourcesConfig struct {
|
||||||
Worker Resources `yaml:"worker"`
|
Worker ResourceRequirements `yaml:"worker"`
|
||||||
Hub Resources `yaml:"hub"`
|
Hub ResourceRequirements `yaml:"hub"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TapConfig struct {
|
type TapConfig struct {
|
||||||
|
@ -26,11 +26,11 @@ spec:
|
|||||||
name: kubeshark-hub
|
name: kubeshark-hub
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: '{{ .Values.tap.resources.hub.cpu-limit }}'
|
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'
|
||||||
memory: '{{ .Values.tap.resources.hub.memory-limit }}'
|
memory: '{{ .Values.tap.resources.hub.limits.memory }}'
|
||||||
requests:
|
requests:
|
||||||
cpu: '{{ .Values.tap.resources.hub.cpu-requests }}'
|
cpu: '{{ .Values.tap.resources.hub.requests.cpu }}'
|
||||||
memory: '{{ .Values.tap.resources.hub.memory-requests }}'
|
memory: '{{ .Values.tap.resources.hub.requests.memory }}'
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
imagePullSecrets: '{{ .Values.tap.docker.imagepullsecrets }}'
|
imagePullSecrets: '{{ .Values.tap.docker.imagepullsecrets }}'
|
||||||
serviceAccountName: kubeshark-service-account
|
serviceAccountName: kubeshark-service-account
|
||||||
|
@ -43,11 +43,11 @@ spec:
|
|||||||
name: kubeshark-worker-daemon-set
|
name: kubeshark-worker-daemon-set
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: '{{ .Values.tap.resources.worker.cpu-limit }}'
|
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
|
||||||
memory: '{{ .Values.tap.resources.worker.memory-limit }}'
|
memory: '{{ .Values.tap.resources.worker.limits.memory }}'
|
||||||
requests:
|
requests:
|
||||||
cpu: '{{ .Values.tap.resources.worker.cpu-requests }}'
|
cpu: '{{ .Values.tap.resources.worker.requests.cpu }}'
|
||||||
memory: '{{ .Values.tap.resources.worker.memory-requests }}'
|
memory: '{{ .Values.tap.resources.worker.requests.memory }}'
|
||||||
securityContext:
|
securityContext:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
|
@ -24,15 +24,19 @@ tap:
|
|||||||
pcap: ""
|
pcap: ""
|
||||||
resources:
|
resources:
|
||||||
worker:
|
worker:
|
||||||
cpu-limit: 750m
|
limits:
|
||||||
memory-limit: 1Gi
|
cpu: 750m
|
||||||
cpu-requests: 50m
|
memory: 1Gi
|
||||||
memory-requests: 50Mi
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
hub:
|
hub:
|
||||||
cpu-limit: 750m
|
limits:
|
||||||
memory-limit: 1Gi
|
cpu: 750m
|
||||||
cpu-requests: 50m
|
memory: 1Gi
|
||||||
memory-requests: 50Mi
|
requests:
|
||||||
|
cpu: 50m
|
||||||
|
memory: 50Mi
|
||||||
servicemesh: true
|
servicemesh: true
|
||||||
tls: true
|
tls: true
|
||||||
packetcapture: libpcap
|
packetcapture: libpcap
|
||||||
|
@ -180,26 +180,26 @@ type PodOptions struct {
|
|||||||
PodName string
|
PodName string
|
||||||
PodImage string
|
PodImage string
|
||||||
ServiceAccountName string
|
ServiceAccountName string
|
||||||
Resources configStructs.Resources
|
Resources configStructs.ResourceRequirements
|
||||||
ImagePullPolicy core.PullPolicy
|
ImagePullPolicy core.PullPolicy
|
||||||
ImagePullSecrets []core.LocalObjectReference
|
ImagePullSecrets []core.LocalObjectReference
|
||||||
Debug bool
|
Debug bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
||||||
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
|
cpuLimit, err := resource.ParseQuantity(opts.Resources.Limits.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
|
memLimit, err := resource.ParseQuantity(opts.Resources.Limits.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
|
cpuRequests, err := resource.ParseQuantity(opts.Resources.Requests.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
|
memRequests, err := resource.ParseQuantity(opts.Resources.Requests.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
@ -294,19 +294,19 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
|
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
|
||||||
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
|
cpuLimit, err := resource.ParseQuantity(opts.Resources.Limits.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
|
memLimit, err := resource.ParseQuantity(opts.Resources.Limits.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
|
cpuRequests, err := resource.ParseQuantity(opts.Resources.Requests.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
|
memRequests, err := resource.ParseQuantity(opts.Resources.Requests.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
|
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
|
||||||
}
|
}
|
||||||
@ -680,7 +680,7 @@ func (provider *Provider) BuildWorkerDaemonSet(
|
|||||||
podImage string,
|
podImage string,
|
||||||
podName string,
|
podName string,
|
||||||
serviceAccountName string,
|
serviceAccountName string,
|
||||||
resources configStructs.Resources,
|
resources configStructs.ResourceRequirements,
|
||||||
imagePullPolicy core.PullPolicy,
|
imagePullPolicy core.PullPolicy,
|
||||||
imagePullSecrets []core.LocalObjectReference,
|
imagePullSecrets []core.LocalObjectReference,
|
||||||
serviceMesh bool,
|
serviceMesh bool,
|
||||||
@ -688,19 +688,19 @@ func (provider *Provider) BuildWorkerDaemonSet(
|
|||||||
debug bool,
|
debug bool,
|
||||||
) (*DaemonSet, error) {
|
) (*DaemonSet, error) {
|
||||||
// Resource limits
|
// Resource limits
|
||||||
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
|
cpuLimit, err := resource.ParseQuantity(resources.Limits.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid cpu limit for %s pod", podName)
|
return nil, fmt.Errorf("invalid cpu limit for %s pod", podName)
|
||||||
}
|
}
|
||||||
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
|
memLimit, err := resource.ParseQuantity(resources.Limits.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid memory limit for %s pod", podName)
|
return nil, fmt.Errorf("invalid memory limit for %s pod", podName)
|
||||||
}
|
}
|
||||||
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
|
cpuRequests, err := resource.ParseQuantity(resources.Requests.CPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid cpu request for %s pod", podName)
|
return nil, fmt.Errorf("invalid cpu request for %s pod", podName)
|
||||||
}
|
}
|
||||||
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
|
memRequests, err := resource.ParseQuantity(resources.Requests.Memory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid memory request for %s pod", podName)
|
return nil, fmt.Errorf("invalid memory request for %s pod", podName)
|
||||||
}
|
}
|
||||||
@ -897,7 +897,7 @@ func (provider *Provider) ApplyWorkerDaemonSet(
|
|||||||
podImage string,
|
podImage string,
|
||||||
podName string,
|
podName string,
|
||||||
serviceAccountName string,
|
serviceAccountName string,
|
||||||
resources configStructs.Resources,
|
resources configStructs.ResourceRequirements,
|
||||||
imagePullPolicy core.PullPolicy,
|
imagePullPolicy core.PullPolicy,
|
||||||
imagePullSecrets []core.LocalObjectReference,
|
imagePullSecrets []core.LocalObjectReference,
|
||||||
serviceMesh bool,
|
serviceMesh bool,
|
||||||
|
@ -14,7 +14,7 @@ func CreateWorkers(
|
|||||||
selfServiceAccountExists bool,
|
selfServiceAccountExists bool,
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
namespace string,
|
namespace string,
|
||||||
resources configStructs.Resources,
|
resources configStructs.ResourceRequirements,
|
||||||
imagePullPolicy core.PullPolicy,
|
imagePullPolicy core.PullPolicy,
|
||||||
imagePullSecrets []core.LocalObjectReference,
|
imagePullSecrets []core.LocalObjectReference,
|
||||||
serviceMesh bool,
|
serviceMesh bool,
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.Resources, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
|
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.ResourceRequirements, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
|
||||||
if !isNsRestrictedMode {
|
if !isNsRestrictedMode {
|
||||||
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
|
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
|
||||||
log.Debug().Err(err).Send()
|
log.Debug().Err(err).Send()
|
||||||
|
Loading…
Reference in New Issue
Block a user