ci/openshift-ci: Move openshift-ci from the tests repo

Move the f15be37d9bef58a0128bcba006f8abb3ea13e8da version of scripts
required for openshift-ci from "kata-containers/tests/.ci/openshift-ci"
into "kata-containers/kata-containers/ci/openshift-ci" and required
webhook+libs into "kata-containers/kata-containers/tools/testing" as is
to simplify verification, the different location handling will be added
in following commit.

Signed-off-by: Lukáš Doktor <ldoktor@redhat.com>
This commit is contained in:
Lukáš Doktor 2023-12-13 12:49:17 +01:00
parent bf54a02e16
commit 4c58478536
No known key found for this signature in database
GPG Key ID: 26B362E47FCF22C1
25 changed files with 1188 additions and 0 deletions

View File

@ -0,0 +1,6 @@
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
SELINUX=permissive
SELINUXTYPE=targeted

View File

@ -0,0 +1,35 @@
#!/bin/bash
#
# Copyright (c) 2021 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script builds the kata-webhook and deploys it in the test cluster.
#
# You should export the KATA_RUNTIME variable with the runtimeclass name
# configured in your cluster in case it is not the default "kata-ci".
#
set -e
set -o nounset
set -o pipefail
script_dir="$(dirname $0)"
webhook_dir="${script_dir}/../../../kata-webhook"
source "${script_dir}/../../lib.sh"
KATA_RUNTIME=${KATA_RUNTIME:-kata-ci}
info "Creates the kata-webhook ConfigMap"
RUNTIME_CLASS="${KATA_RUNTIME}" \
envsubst < "${script_dir}/deployments/configmap_kata-webhook.yaml.in" \
| oc apply -f -
pushd "${webhook_dir}" >/dev/null
# Build and deploy the webhook
#
info "Builds the kata-webhook"
./create-certs.sh
info "Deploys the kata-webhook"
oc apply -f deploy/
# Check the webhook was deployed and is working.
RUNTIME_CLASS="${KATA_RUNTIME}" ./webhook-check.sh
popd >/dev/null

View File

@ -0,0 +1,13 @@
# Copyright (c) 2021 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Instruct the daemonset installer to configure Kata Containers to use the
# host kernel.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: ci.kata.installer.kernel
data:
host_kernel: "yes"

View File

@ -0,0 +1,14 @@
# Copyright (c) 2021 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Instruct the daemonset installer to configure Kata Containers to use the
# system QEMU.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: ci.kata.installer.qemu
data:
qemu_path: /usr/libexec/qemu-kvm
host_kernel: "yes"

View File

@ -0,0 +1,12 @@
# Copyright (c) 2021 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Apply customizations to the kata-webhook.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: kata-webhook
data:
runtime_class: ${RUNTIME_CLASS}

View File

@ -0,0 +1,9 @@
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 50-enable-sandboxed-containers-extension
spec:
extensions:
- sandboxed-containers

View File

@ -0,0 +1,23 @@
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Configure SELinux on worker nodes.
---
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 51-kata-selinux
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,${SELINUX_CONF_BASE64}
filesystem: root
mode: 0644
path: /etc/selinux/config

View File

@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: relabel-selinux-daemonset
namespace: kube-system
spec:
selector:
matchLabels:
app: restorecon
template:
metadata:
labels:
app: restorecon
spec:
serviceAccountName: kata-deploy-sa
hostPID: true
containers:
- name: relabel-selinux-container
image: alpine
securityContext:
privileged: true
command: ["/bin/sh", "-c", "
set -e;
echo Starting the relabel;
nsenter --target 1 --mount bash -xc '
for ENTRY in \
\"/(.*/)?opt/kata/bin(/.*)?\" \
\"/(.*/)?opt/kata/runtime-rs/bin(/.*)?\" \
\"/(.*/)?opt/kata/share/kata-.*(/.*)?(/.*)?\" \
\"/(.*/)?opt/kata/share/ovmf(/.*)?\" \
\"/(.*/)?opt/kata/share/tdvf(/.*)?\" \
\"/(.*/)?opt/kata/libexec(/.*)?\";
do
semanage fcontext -a -t qemu_exec_t \"$ENTRY\" || { echo \"Error in semanage command\"; exit 1; }
done;
restorecon -v -R /opt/kata || { echo \"Error in restorecon command\"; exit 1; }
';
echo NSENTER_FINISHED_WITH: $?;
sleep infinity"]

View File

@ -0,0 +1,191 @@
#!/bin/bash
#
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script installs the built kata-containers in the test cluster,
# and configure a runtime.
scripts_dir=$(dirname $0)
deployments_dir=${scripts_dir}/deployments
configs_dir=${scripts_dir}/configs
source ${scripts_dir}/../../lib.sh
# Set to 'yes' if you want to configure SELinux to permissive on the cluster
# workers.
#
SELINUX_PERMISSIVE=${SELINUX_PERMISSIVE:-no}
# Set to 'yes' if you want to configure Kata Containers to use the system's
# QEMU (from the RHCOS extension).
#
KATA_WITH_SYSTEM_QEMU=${KATA_WITH_SYSTEM_QEMU:-no}
# Set to 'yes' if you want to configure Kata Containers to use the host kernel.
#
KATA_WITH_HOST_KERNEL=${KATA_WITH_HOST_KERNEL:-no}
# Leverage kata-deploy to install Kata Containers in the cluster.
#
apply_kata_deploy() {
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
local old_img="quay.io/kata-containers/kata-deploy:latest"
# Use the kata-deploy CI image which is built for each pull request merged
local new_img="quay.io/kata-containers/kata-deploy-ci:kata-containers-latest"
pushd "$katacontainers_repo_dir"
sed -i "s#${old_img}#${new_img}#" "$deploy_file"
info "Applying kata-deploy"
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
oc apply -f "$deploy_file"
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
info "Adding the kata runtime classes"
oc apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml
popd
}
# Wait all worker nodes reboot.
#
# Params:
# $1 - timeout in seconds (default to 900).
#
wait_for_reboot() {
local delta="${1:-900}"
local sleep_time=60
declare -A BOOTIDS
local workers=($(oc get nodes | \
awk '{if ($3 == "worker") { print $1 } }'))
# Get the boot ID to compared it changed over time.
for node in ${workers[@]}; do
BOOTIDS[$node]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
node/$node)
echo "Wait $node reboot"
done
echo "Set timeout to $delta seconds"
timer_start=$(date +%s)
while [ ${#workers[@]} -gt 0 ]; do
sleep $sleep_time
now=$(date +%s)
if [ $(($timer_start + $delta)) -lt $now ]; then
echo "Timeout: not all workers rebooted"
return 1
fi
echo "Checking after $(($now - $timer_start)) seconds"
for i in ${!workers[@]}; do
current_id=$(oc get \
-o jsonpath='{.status.nodeInfo.bootID}' \
node/${workers[i]})
if [ "$current_id" != ${BOOTIDS[${workers[i]}]} ]; then
echo "${workers[i]} rebooted"
unset workers[i]
fi
done
done
}
wait_mcp_update() {
local delta="${1:-900}"
local sleep_time=30
# The machineconfigpool is fine when all the workers updated and are ready,
# and none are degraded.
local ready_count=0
local degraded_count=0
local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
if [[ -z "$machine_count" && "$machine_count" -lt 1 ]]; then
warn "Unabled to obtain the machine count"
return 1
fi
echo "Set timeout to $delta seconds"
local deadline=$(($(date +%s) + $delta))
# The ready count might not have changed yet, so wait a little.
while [[ "$ready_count" != "$machine_count" && \
"$degraded_count" == 0 ]]; do
# Let's check it hit the timeout (or not).
local now=$(date +%s)
if [ $deadline -lt $now ]; then
echo "Timeout: not all workers updated" >&2
return 1
fi
sleep $sleep_time
ready_count=$(oc get mcp worker \
-o jsonpath='{.status.readyMachineCount}')
degraded_count=$(oc get mcp worker \
-o jsonpath='{.status.degradedMachineCount}')
echo "check machineconfigpool - ready_count: $ready_count degraded_count: $degraded_count"
done
[ $degraded_count -eq 0 ]
}
# Enable the RHCOS extension for the Sandboxed Containers.
#
enable_sandboxedcontainers_extension() {
info "Enabling the RHCOS extension for Sandboxed Containers"
local deployment_file="${deployments_dir}/machineconfig_sandboxedcontainers_extension.yaml"
oc apply -f ${deployment_file}
oc get -f ${deployment_file} || \
die "Sandboxed Containers extension machineconfig not found"
wait_mcp_update || die "Failed to update the machineconfigpool"
}
# Print useful information for debugging.
#
# Params:
# $1 - the pod name
debug_pod() {
local pod="$1"
info "Debug pod: ${pod}"
oc describe pods "$pod"
oc logs "$pod"
}
oc config set-context --current --namespace=default
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
num_nodes=$(echo $worker_nodes | wc -w)
[ $num_nodes -ne 0 ] || \
die "No worker nodes detected. Something is wrong with the cluster"
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
# QEMU is deployed on the workers via RCHOS extension.
enable_sandboxedcontainers_extension
oc apply -f ${deployments_dir}/configmap_installer_qemu.yaml
fi
if [ "${KATA_WITH_HOST_KERNEL}" == "yes" ]; then
oc apply -f ${deployments_dir}/configmap_installer_kernel.yaml
fi
apply_kata_deploy
# Set SELinux to permissive mode
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
info "Configuring SELinux"
if [ -z "$SELINUX_CONF_BASE64" ]; then
export SELINUX_CONF_BASE64=$(echo \
$(cat $configs_dir/selinux.conf|base64) | \
sed -e 's/\s//g')
fi
envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \
oc apply -f -
oc get machineconfig/51-kata-selinux || \
die "SELinux machineconfig not found"
# The new SELinux configuration will trigger another reboot.
wait_for_reboot
fi
# FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved
# Selinux context is currently not handled by kata-deploy
oc apply -f ${deployments_dir}/relabel_selinux.yaml
( for I in $(seq 30); do
sleep 10
oc logs -n kube-system ds/relabel-selinux-daemonset | grep "NSENTER_FINISHED_WITH:" && exit
done ) || { echo "Selinux relabel failed, check the logs"; exit -1; }

View File

@ -0,0 +1,82 @@
#!/bin/bash
#
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Run a smoke test.
#
script_dir=$(dirname $0)
source ${script_dir}/../lib.sh
pod='http-server'
# Create a pod.
#
info "Creating the ${pod} pod"
oc apply -f ${script_dir}/smoke/${pod}.yaml || \
die "failed to create ${pod} pod"
# Check it eventually goes to 'running'
#
wait_time=600
sleep_time=5
cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
grep running > /dev/null"
info "Wait until the pod gets running"
waitForProcess $wait_time $sleep_time "$cmd" || timed_out=$?
if [ -n "$timed_out" ]; then
oc describe pod/${pod}
oc delete pod/${pod}
die "${pod} not running"
fi
info "${pod} is running"
# Add a file with the hello message
#
hello_file=/tmp/hello
hello_msg='Hello World'
oc exec ${pod} -- sh -c "echo $hello_msg > $hello_file"
info "Creating the service and route"
if oc apply -f ${script_dir}/smoke/service.yaml; then
# Likely on OCP, use service
is_ocp=1
host=$(oc get route/http-server-route -o jsonpath={.spec.host})
port=80
else
# Likely on plain kubernetes, test using another container
is_ocp=0
info "Failed to create service, likely not on OCP, trying via NodePort"
oc apply -f "${script_dir}/smoke/service_kubernetes.yaml"
# For some reason kcli's cluster lists external IP as internal IP, try both
host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}')
[ -z "$host"] && host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
port=$(oc get service/http-server-service -o jsonpath='{.spec.ports[0].nodePort}')
fi
info "Wait for the HTTP server to respond"
rm -f hello_msg.txt
waitForProcess 60 1 "curl '${host}:${port}${hello_file}' -s -o hello_msg.txt"
grep "${hello_msg}" hello_msg.txt > /dev/null
test_status=$?
if [ $test_status -eq 0 ]; then
info "HTTP server is working"
else
info "HTTP server is unreachable"
fi
# Delete the resources.
#
info "Deleting the service/route"
if [ "$is_ocp" -eq 0 ]; then
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
else
oc delete -f ${script_dir}/smoke/service.yaml
fi
info "Deleting the ${pod} pod"
oc delete pod/${pod} || test_status=$?
exit $test_status

View File

@ -0,0 +1,30 @@
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Define the pod for a http server app.
---
apiVersion: v1
kind: Pod
metadata:
name: http-server
labels:
app: http-server-app
spec:
containers:
- name: http-server
image: registry.fedoraproject.org/fedora
ports:
- containerPort: 8080
command: ["python3"]
args: [ "-m", "http.server", "8080"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
runtimeClassName: kata-qemu

View File

@ -0,0 +1,28 @@
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Create the service on port 80 for the http-server app.
---
apiVersion: v1
kind: Service
metadata:
name: http-server-service
spec:
selector:
app: http-server-app
ports:
- protocol: TCP
port: 80
targetPort: 8080
# Create the route to the app's service '/'.
---
apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: http-server-route
spec:
path: "/"
to:
kind: Service
name: http-server-service

View File

@ -0,0 +1,18 @@
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# Create the service on port 80 for the http-server app.
---
apiVersion: v1
kind: Service
metadata:
name: http-server-service
spec:
selector:
app: http-server-app
ports:
- protocol: TCP
port: 80
targetPort: 8080
type: NodePort

29
ci/openshift-ci/test.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
#
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
script_dir=$(dirname $0)
source ${script_dir}/../lib.sh
suite=$1
if [ -z "$1" ]; then
suite='smoke'
fi
# Make oc and kubectl visible
export PATH=/tmp/shared:$PATH
oc version || die "Test cluster is unreachable"
info "Install and configure kata into the test cluster"
export SELINUX_PERMISSIVE="no"
${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers"
info "Run test suite: $suite"
test_status='PASS'
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
info "Test suite: $suite: $test_status"
[ "$test_status" == "PASS" ]

2
tools/testing/kata-webhook/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
deploy/webhook-certs.yaml
deploy/webhook-registration.yaml

View File

@ -0,0 +1,14 @@
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
FROM golang:1.21.5-alpine3.19 AS builder
WORKDIR /go/src/kata-pod-annotate
COPY . ./
RUN CGO_ENABLED=0 go build -o /go/bin/kata-pod-annotate
FROM alpine:3.19
COPY --from=builder /go/bin/kata-pod-annotate /kata-pod-annotate
ENTRYPOINT ["/kata-pod-annotate"]

View File

@ -0,0 +1,60 @@
# Kata Admission controller webhook
Implement a simple admission controller webhook to annotate pods with the
Kata runtime class.
## How to build the admission controller
> **Note:**
> Only run this step if you are modifying the current webhook or don't
> want to use the webhook available in docker hub.
First build the admission controller image and the associated
Kubernetes YAML files required to instantiate the admission
controller.
```bash
docker build -t quay.io/kata-containers/kata-webhook-example:latest -f Dockerfile .
```
> **Note**
> Image needs to be published for the webhook needs to work. Alternately
> on a single machine cluster change the `imagePullPolicy` to use the locally
> built image.
## Making Kata the default runtime using an admission controller
Today in `crio.conf` `runc` is the default runtime when a user does not specify
`runtimeClass` in the pod spec. If you want to run a cluster where Kata is used
by default, except for workloads we know for sure will not work with Kata, use
the [admission webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
and sample admission controller we created by running the commands below:
> **Note**
>
> By default, the `runtimeClass` name used in this webhook is `kata`. If your
> cluster is configured with another `runtimeClass`, you'll need to change the
> value of the `RUNTIME_CLASS` environment variable defined in the
> [webhook file](deploy/webhook.yaml). You can manually edit the file or run:
>
> `export RUNTIME_CLASS=<>`
>
> `kubectl create cm kata-webhook --from-literal runtime_class=$RUNTIME_CLASS`
```bash
./create-certs.sh
kubectl apply -f deploy/
```
Afterwards you can run the `webhook-check.sh` script to check the webhook was
deployed correctly and is working:
```bash
./webhook-check.sh
```
The webhook mutates pods to use the Kata runtime class for all pods except
those with
* `hostNetwork: true`
* namespace: `rook-ceph` and `rook-ceph-system`

View File

@ -0,0 +1,13 @@
#!/bin/bash
#
# Copyright (c) 2021 Red Hat
#
# SPDX-License-Identifier: Apache-2.0
#
# Webhook namespace.
WEBHOOK_NS=${WEBHOOK_NS:-"default"}
# Webhook Pod name.
WEBHOOK_NAME=${WEBHOOK_NAME:-"pod-annotate"}
# Webhook service name.
WEBHOOK_SVC="${WEBHOOK_NAME}-webhook"

View File

@ -0,0 +1,81 @@
#! /bin/bash
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
webhook_dir=$(dirname $0)
source "${webhook_dir}/common.bash"
[ -n "${1:-}" ] && WEBHOOK_NS="$1"
[ -n "${2:-}" ] && WEBHOOK_NAME="$2"
if ! command -v openssl &>/dev/null; then
echo "ERROR: command 'openssl' not found."
exit 1
elif ! command -v kubectl &>/dev/null; then
echo "ERROR: command 'kubectl' not found."
exit 1
fi
cleanup() {
rm -rf *.key *.crt *.csr *.srl
[ -n "${CSR_CONFIG_FILE:-}" ] && rm -f ${CSR_CONFIG_FILE}
}
trap cleanup EXIT
# Create certs for our webhook
touch $HOME/.rnd
# Create a Certificate Signing Request configuration file.
CSR_CONFIG_FILE="$(mktemp)"
cat << EOF >$CSR_CONFIG_FILE
[ req ]
default_bits = 2048
prompt = no
default_md = sha256
req_extensions = req_ext
distinguished_name = dn
[ dn ]
CN = "Kata Containers Webhook"
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = ${WEBHOOK_SVC}.${WEBHOOK_NS}.svc
[ v3_ext ]
authorityKeyIdentifier=keyid,issuer:always
basicConstraints=CA:FALSE
keyUsage=keyEncipherment,dataEncipherment
extendedKeyUsage=serverAuth,clientAuth
subjectAltName=@alt_names
EOF
openssl genrsa -out webhookCA.key 2048
openssl req -x509 -new -nodes -key webhookCA.key \
-subj "/CN=Kata Containers Webhook" -days 365 -out webhookCA.crt
openssl genrsa -out webhook.key 2048
openssl req -new -key webhook.key -out webhook.csr -config "${CSR_CONFIG_FILE}"
openssl x509 -req -in webhook.csr -CA webhookCA.crt -CAkey webhookCA.key \
-CAcreateserial -out webhook.crt -days 365 \
-extensions v3_ext -extfile "${CSR_CONFIG_FILE}"
# Create certs secrets for k8s
kubectl create secret generic \
${WEBHOOK_SVC}-certs \
--from-file=key.pem=./webhook.key \
--from-file=cert.pem=./webhook.crt \
--dry-run=client -o yaml > ./deploy/webhook-certs.yaml
# Set the CABundle on the webhook registration
CA_BUNDLE=$(cat ./webhookCA.crt ./webhook.crt | base64 -w0)
sed "s/CA_BUNDLE/${CA_BUNDLE}/" ./deploy/webhook-registration.yaml.tpl > ./deploy/webhook-registration.yaml

View File

@ -0,0 +1,27 @@
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: pod-annotate-webhook
labels:
app: pod-annotate-webhook
kind: mutator
webhooks:
- name: pod-annotate-webhook.kata.xyz
sideEffects: None
failurePolicy: Ignore
admissionReviewVersions: ["v1", "v1beta1"]
clientConfig:
service:
name: pod-annotate-webhook
namespace: default
path: "/mutate"
caBundle: CA_BUNDLE
rules:
- operations: [ "CREATE" ]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]

View File

@ -0,0 +1,69 @@
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-annotate-webhook
labels:
app: pod-annotate-webhook
spec:
selector:
matchLabels:
app: pod-annotate-webhook
replicas: 1
template:
metadata:
labels:
app: pod-annotate-webhook
spec:
containers:
- name: pod-annotate-webhook
image: quay.io/kata-containers/kata-webhook-example:latest
imagePullPolicy: Always
env:
- name: RUNTIME_CLASS
valueFrom:
configMapKeyRef:
name: kata-webhook
key: runtime_class
optional: true
args:
- -tls-cert-file=/etc/webhook/certs/cert.pem
- -tls-key-file=/etc/webhook/certs/key.pem
- -exclude-namespaces=rook-ceph-system,rook-ceph
volumeMounts:
- name: webhook-certs
mountPath: /etc/webhook/certs
readOnly: true
resources:
requests:
cpu: "100m"
memory: "250Mi"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumes:
- name: webhook-certs
secret:
secretName: pod-annotate-webhook-certs
---
apiVersion: v1
kind: Service
metadata:
name: pod-annotate-webhook
labels:
app: pod-annotate-webhook
spec:
ports:
- port: 443
targetPort: 8080
selector:
app: pod-annotate-webhook

View File

@ -0,0 +1,32 @@
module module-path
go 1.21
require (
github.com/sirupsen/logrus v1.9.3
github.com/slok/kubewebhook/v2 v2.5.0
k8s.io/api v0.29.0
k8s.io/apimachinery v0.29.0
)
require (
github.com/go-logr/logr v1.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
gomodules.xyz/jsonpatch/v3 v3.0.1 // indirect
gomodules.xyz/orderedmap v0.1.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/client-go v0.29.0 // indirect
k8s.io/klog/v2 v2.110.1 // indirect
k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

View File

@ -0,0 +1,121 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/slok/kubewebhook/v2 v2.5.0 h1:CwMxLbTEcha3+SxSXc4pc9iIbREdhgLurAs+/uRzxIw=
github.com/slok/kubewebhook/v2 v2.5.0/go.mod h1:TcQS+Ae0TDiiwm9glxum6AFvtumR33qdAenUeiQ/TWs=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v3 v3.0.1 h1:Te7hKxV52TKCbNYq3t84tzKav3xhThdvSsSp/W89IyI=
gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto=
gomodules.xyz/orderedmap v0.1.0 h1:fM/+TGh/O1KkqGR5xjTKg6bU8OKBkg7p0Y+x/J9m8Os=
gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A=
k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA=
k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o=
k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis=
k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8=
k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -0,0 +1,154 @@
// Copyright (c) 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/sirupsen/logrus"
kwhhttp "github.com/slok/kubewebhook/v2/pkg/http"
kwhlogrus "github.com/slok/kubewebhook/v2/pkg/log/logrus"
kwhmodel "github.com/slok/kubewebhook/v2/pkg/model"
kwhmutating "github.com/slok/kubewebhook/v2/pkg/webhook/mutating"
)
func getRuntimeClass(runtimeClassKey, defaultRuntimeClass string) string {
if runtimeClass, ok := os.LookupEnv(runtimeClassKey); ok {
return runtimeClass
}
return defaultRuntimeClass
}
func annotatePodMutator(_ context.Context, ar *kwhmodel.AdmissionReview, obj metav1.Object) (*kwhmutating.MutatorResult, error) {
pod, ok := obj.(*corev1.Pod)
if !ok {
// If not a pod just continue the mutation chain (if there is one) and don't do anything
return &kwhmutating.MutatorResult{}, nil
}
// The Namespace is not always available in the pod Spec
// specially when operators create the pods. Hence access
// the Namespace in the actual request (vs the object)
// https://godoc.org/k8s.io/api/admission/v1beta1#AdmissionRequest
if whPolicy.nsBlacklist[ar.Namespace] {
fmt.Println("blacklisted namespace: ", ar.Namespace)
return &kwhmutating.MutatorResult{}, nil
}
// We cannot support --net=host in Kata
// https://github.com/kata-containers/documentation/blob/master/Limitations.md#docker---nethost
if pod.Spec.HostNetwork {
fmt.Println("host network: ", pod.GetNamespace(), pod.GetName())
return &kwhmutating.MutatorResult{}, nil
}
if pod.GetNamespace() == "sonobuoy" {
fmt.Println("sonobuoy pods will not be changed to kata", pod.GetNamespace(), pod.GetName())
return &kwhmutating.MutatorResult{}, nil
}
for i := range pod.Spec.Containers {
if pod.Spec.Containers[i].SecurityContext != nil && pod.Spec.Containers[i].SecurityContext.Privileged != nil {
if *pod.Spec.Containers[i].SecurityContext.Privileged {
fmt.Println("privileged container: ", pod.GetNamespace(), pod.GetName())
return &kwhmutating.MutatorResult{}, nil
}
}
}
if pod.Spec.RuntimeClassName != nil {
fmt.Println("explicit runtime: ", pod.GetNamespace(), pod.GetName(), pod.Spec.RuntimeClassName)
return &kwhmutating.MutatorResult{}, nil
}
// Mutate the pod
fmt.Println("setting runtime to kata: ", pod.GetNamespace(), pod.GetName())
runtimeClassEnvKey := "RUNTIME_CLASS"
kataRuntimeClassName := getRuntimeClass(runtimeClassEnvKey, "kata")
pod.Spec.RuntimeClassName = &kataRuntimeClassName
return &kwhmutating.MutatorResult{
MutatedObject: pod,
}, nil
}
type config struct {
certFile string
keyFile string
nsBlacklist string
}
type policy struct {
nsBlacklist map[string]bool
}
var whPolicy *policy
func initFlags() *config {
cfg := &config{}
fl := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
fl.StringVar(&cfg.certFile, "tls-cert-file", "", "TLS certificate file")
fl.StringVar(&cfg.keyFile, "tls-key-file", "", "TLS key file")
fl.StringVar(&cfg.nsBlacklist, "exclude-namespaces", "", "Comma separated namespace blacklist")
fl.Parse(os.Args[1:])
return cfg
}
func main() {
logrusLogEntry := logrus.NewEntry(logrus.New())
logrusLogEntry.Logger.SetLevel(logrus.DebugLevel)
logger := kwhlogrus.NewLogrus(logrusLogEntry)
cfg := initFlags()
whPolicy = &policy{}
whPolicy.nsBlacklist = make(map[string]bool)
if cfg.nsBlacklist != "" {
for _, s := range strings.Split(cfg.nsBlacklist, ",") {
whPolicy.nsBlacklist[s] = true
}
}
// Create our mutator
mt := kwhmutating.MutatorFunc(annotatePodMutator)
mcfg := kwhmutating.WebhookConfig{
ID: "podAnnotate",
Obj: &corev1.Pod{},
Mutator: mt,
Logger: logger,
}
wh, err := kwhmutating.NewWebhook(mcfg)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating webhook: %s", err)
os.Exit(1)
}
// Get the handler for our webhook.
whHandler, err := kwhhttp.HandlerFor(kwhhttp.HandlerConfig{Webhook: wh, Logger: logger})
if err != nil {
fmt.Fprintf(os.Stderr, "error creating webhook handler: %s", err)
os.Exit(1)
}
port := ":8080"
logger.Infof("Listening on %s", port)
err = http.ListenAndServeTLS(port, cfg.certFile, cfg.keyFile, whHandler)
if err != nil {
fmt.Fprintf(os.Stderr, "error serving webhook: %s", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,86 @@
#!/bin/bash
#
# Copyright (c) 2021 Red Hat
#
# SPDX-License-Identifier: Apache-2.0
#
# Run this script to check the webhook is deployed and working
set -o errexit
set -o nounset
set -o pipefail
webhook_dir=$(dirname $0)
source "${webhook_dir}/../lib/common.bash"
source "${webhook_dir}/common.bash"
readonly hello_pod="hello-kata-webhook"
# The Pod RuntimeClassName for Kata Containers.
RUNTIME_CLASS="${RUNTIME_CLASS:-"kata"}"
cleanup() {
{
kubectl get -n ${WEBHOOK_NS} pod/${hello_pod} && \
kubectl delete -n ${WEBHOOK_NS} pod/${hello_pod}
} &>/dev/null
}
trap cleanup EXIT
# Check the deployment exists and is available.
#
check_deployed() {
local timeout="60s"
kubectl get -n ${WEBHOOK_NS} deployment/${WEBHOOK_SVC} &>/dev/null || \
die "The ${WEBHOOK_SVC} deployment does not exist"
kubectl wait -n ${WEBHOOK_NS} deployment/${WEBHOOK_SVC} \
--for condition=Available --timeout ${timeout} &>/dev/null || \
die "The ${WEBHOOK_SVC} deployment is unavailable after ${timeout} waiting"
}
# Check the webhook is working as expected.
#
check_working() {
kubectl get -n ${WEBHOOK_NS} pod/${hello_pod} &>/dev/null && \
die "${hello_pod} pod exists, cannot reliably check the webhook"
cat <<-EOF | kubectl apply -f -
kind: Pod
apiVersion: v1
metadata:
name: ${hello_pod}
namespace: ${WEBHOOK_NS}
spec:
restartPolicy: Never
containers:
- name: ${hello_pod}
image: quay.io/prometheus/busybox:latest
command: ["echo", "Hello Webhook"]
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
EOF
local class_name=$(kubectl get -n ${WEBHOOK_NS} \
-o jsonpath='{.spec.runtimeClassName}' pod/${hello_pod})
if [ "${class_name}" != "${RUNTIME_CLASS}" ]; then
warn "RuntimeClassName expected ${RUNTIME_CLASS}, got ${class_name}"
die "kata-webhook is not working"
fi
}
main() {
info "Going to check the kata-webhook installation"
[ -n "${KUBECONFIG:-}" ] || die "KUBECONFIG should be exported"
check_deployed
check_working
info "kata-webhook is up and working"
}
main $@