🔥 Delete the manifests and add complete.yaml instead

This commit is contained in:
M. Mert Yildiran 2023-06-27 01:22:30 +03:00
parent 5c4c913a27
commit c07f1851b3
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
14 changed files with 310 additions and 364 deletions

View File

@ -68,3 +68,6 @@ kubectl-view-all-resources: ## This command outputs all Kubernetes resources usi
kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
./kubectl.sh view-kubeshark-resources
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
helm template ./helm-chart > ./manifests/complete.yaml

View File

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark
spec: {}
status: {}

View File

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-service-account
namespace: kubeshark

View File

@ -1,27 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role
namespace: kubeshark
rules:
- apiGroups:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get
- watch

View File

@ -1,19 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role-binding
namespace: kubeshark
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: kubeshark

View File

@ -1,47 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: kubeshark
spec:
containers:
- command:
- ./hub
env:
- name: POD_REGEX
value: .*
- name: NAMESPACES
- name: LICENSE
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
image: docker.io/kubeshark/hub:latest
imagePullPolicy: Always
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: 8898
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@ -1,20 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: kubeshark
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: NodePort
status:
loadBalancer: {}

View File

@ -1,49 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: kubeshark
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: "8898"
image: docker.io/kubeshark/front:latest
imagePullPolicy: Always
name: kubeshark-front
ports:
- containerPort: 80
hostPort: 8899
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@ -1,20 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: kubeshark
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: NodePort
status:
loadBalancer: {}

View File

@ -1,19 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-persistent-volume-claim
namespace: kubeshark
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: standard
status: {}

View File

@ -1,92 +0,0 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- ./worker
- -i
- any
- -port
- "8897"
- -packet-capture
- libpcap
- -servicemesh
- -tls
- -procfs
- /hostproc
image: docker.io/kubeshark/worker:latest
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
ports:
- containerPort: 8897
hostPort: 8897
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: kubeshark-persistent-volume
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim

View File

@ -1,13 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: kubeshark
spec:
controller: k8s.io/ingress-nginx

View File

@ -1,36 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: kubeshark
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: ks.svc.cluster.local
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
status:
loadBalancer: {}

307
manifests/complete.yaml Normal file
View File

@ -0,0 +1,307 @@
---
# Source: kubeshark/templates/00-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: 'kubeshark'
spec: {}
status: {}
---
# Source: kubeshark/templates/01-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-service-account
namespace: 'kubeshark'
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role
namespace: 'kubeshark'
rules:
- apiGroups:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get
- watch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role-binding
namespace: 'kubeshark'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: 'kubeshark'
---
# Source: kubeshark/templates/05-hub-service.yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: 'kubeshark'
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: NodePort
status:
loadBalancer: {}
---
# Source: kubeshark/templates/07-front-service.yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: 'kubeshark'
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: NodePort
status:
loadBalancer: {}
---
# Source: kubeshark/templates/09-worker-daemon-set.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: 'kubeshark'
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- './worker'
- -i
- any
- -port
- '8897'
- -packet-capture
- 'libpcap'
- -servicemesh
- -tls
- -procfs
- /hostproc
image: 'docker.io/kubeshark/worker:latest'
imagePullPolicy: 'Always'
name: kubeshark-worker-daemon-set
ports:
- containerPort: 8897
hostPort: 8897
resources:
limits:
cpu: '750m'
memory: '1Gi'
requests:
cpu: '50m'
memory: '50Mi'
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
---
# Source: kubeshark/templates/04-hub-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: 'kubeshark'
spec:
containers:
- command:
- './hub'
env:
- name: POD_REGEX
value: '.*'
- name: NAMESPACES
value: ''
- name: LICENSE
value: ''
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
value: ''
image: 'docker.io/kubeshark/hub:latest'
imagePullPolicy: 'Always'
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: 8898
resources:
limits:
cpu: '750m'
memory: '1Gi'
requests:
cpu: '50m'
memory: '50Mi'
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}
---
# Source: kubeshark/templates/06-front-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: 'kubeshark'
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: '8898'
image: 'docker.io/kubeshark/front:latest'
imagePullPolicy: 'Always'
name: kubeshark-front
ports:
- containerPort: 80
hostPort: 8899
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}