Fix the issues in Helm chart such that helm template succeeds

This commit is contained in:
M. Mert Yildiran 2023-04-12 02:12:12 +03:00
parent d2b9bddf78
commit 18addbb980
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
8 changed files with 60 additions and 48 deletions

View File

@ -140,10 +140,10 @@ var hubPodMappings = map[string]interface{}{
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.imagePullSecrets": "{{ .Values.tap.docker.imagepullsecrets }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.cpu-limit }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.memory-limit }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.cpu-requests }}",
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.memory-requests }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.requests.memory }}",
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
}
var hubServiceMappings = namespaceMappings
@ -159,10 +159,10 @@ var workerDaemonSetMappings = map[string]interface{}{
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.imagePullSecrets": "{{ .Values.tap.docker.imagepullsecrets }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.cpu-limit }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.memory-limit }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.cpu-requests }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.memory-requests }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
}

View File

@ -29,11 +29,19 @@ const (
DebugLabel = "debug"
)
type Resources struct {
CpuLimit string `yaml:"cpu-limit" default:"750m"`
MemoryLimit string `yaml:"memory-limit" default:"1Gi"`
CpuRequests string `yaml:"cpu-requests" default:"50m"`
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
type ResourceLimits struct {
CPU string `yaml:"cpu" default:"750m"`
Memory string `yaml:"memory" default:"1Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" default:"50m"`
Memory string `yaml:"memory" default:"50Mi"`
}
type ResourceRequirements struct {
Limits ResourceLimits `json:"limits"`
Requests ResourceRequests `json:"requests"`
}
type WorkerConfig struct {
@ -66,8 +74,8 @@ type DockerConfig struct {
}
type ResourcesConfig struct {
Worker Resources `yaml:"worker"`
Hub Resources `yaml:"hub"`
Worker ResourceRequirements `yaml:"worker"`
Hub ResourceRequirements `yaml:"hub"`
}
type TapConfig struct {

View File

@ -26,11 +26,11 @@ spec:
name: kubeshark-hub
resources:
limits:
cpu: '{{ .Values.tap.resources.hub.cpu-limit }}'
memory: '{{ .Values.tap.resources.hub.memory-limit }}'
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'
memory: '{{ .Values.tap.resources.hub.limits.memory }}'
requests:
cpu: '{{ .Values.tap.resources.hub.cpu-requests }}'
memory: '{{ .Values.tap.resources.hub.memory-requests }}'
cpu: '{{ .Values.tap.resources.hub.requests.cpu }}'
memory: '{{ .Values.tap.resources.hub.requests.memory }}'
dnsPolicy: ClusterFirstWithHostNet
imagePullSecrets: '{{ .Values.tap.docker.imagepullsecrets }}'
serviceAccountName: kubeshark-service-account

View File

@ -43,11 +43,11 @@ spec:
name: kubeshark-worker-daemon-set
resources:
limits:
cpu: '{{ .Values.tap.resources.worker.cpu-limit }}'
memory: '{{ .Values.tap.resources.worker.memory-limit }}'
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
memory: '{{ .Values.tap.resources.worker.limits.memory }}'
requests:
cpu: '{{ .Values.tap.resources.worker.cpu-requests }}'
memory: '{{ .Values.tap.resources.worker.memory-requests }}'
cpu: '{{ .Values.tap.resources.worker.requests.cpu }}'
memory: '{{ .Values.tap.resources.worker.requests.memory }}'
securityContext:
capabilities:
add:

View File

@ -24,15 +24,19 @@ tap:
pcap: ""
resources:
worker:
cpu-limit: 750m
memory-limit: 1Gi
cpu-requests: 50m
memory-requests: 50Mi
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
hub:
cpu-limit: 750m
memory-limit: 1Gi
cpu-requests: 50m
memory-requests: 50Mi
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
servicemesh: true
tls: true
packetcapture: libpcap

View File

@ -180,26 +180,26 @@ type PodOptions struct {
PodName string
PodImage string
ServiceAccountName string
Resources configStructs.Resources
Resources configStructs.ResourceRequirements
ImagePullPolicy core.PullPolicy
ImagePullSecrets []core.LocalObjectReference
Debug bool
}
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
cpuLimit, err := resource.ParseQuantity(opts.Resources.Limits.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
memLimit, err := resource.ParseQuantity(opts.Resources.Limits.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
cpuRequests, err := resource.ParseQuantity(opts.Resources.Requests.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
memRequests, err := resource.ParseQuantity(opts.Resources.Requests.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
}
@ -294,19 +294,19 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
}
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
cpuLimit, err := resource.ParseQuantity(opts.Resources.Limits.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
memLimit, err := resource.ParseQuantity(opts.Resources.Limits.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
cpuRequests, err := resource.ParseQuantity(opts.Resources.Requests.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
memRequests, err := resource.ParseQuantity(opts.Resources.Requests.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
}
@ -680,7 +680,7 @@ func (provider *Provider) BuildWorkerDaemonSet(
podImage string,
podName string,
serviceAccountName string,
resources configStructs.Resources,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
@ -688,19 +688,19 @@ func (provider *Provider) BuildWorkerDaemonSet(
debug bool,
) (*DaemonSet, error) {
// Resource limits
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
cpuLimit, err := resource.ParseQuantity(resources.Limits.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s pod", podName)
}
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
memLimit, err := resource.ParseQuantity(resources.Limits.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s pod", podName)
}
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
cpuRequests, err := resource.ParseQuantity(resources.Requests.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s pod", podName)
}
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
memRequests, err := resource.ParseQuantity(resources.Requests.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s pod", podName)
}
@ -897,7 +897,7 @@ func (provider *Provider) ApplyWorkerDaemonSet(
podImage string,
podName string,
serviceAccountName string,
resources configStructs.Resources,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,

View File

@ -14,7 +14,7 @@ func CreateWorkers(
selfServiceAccountExists bool,
ctx context.Context,
namespace string,
resources configStructs.Resources,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,

View File

@ -14,7 +14,7 @@ import (
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.Resources, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.ResourceRequirements, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
log.Debug().Err(err).Send()