From 7bf23ac95855a76368e1dfbd0cc6245e9e2377a2 Mon Sep 17 00:00:00 2001 From: Manuel Huber Date: Tue, 5 Mar 2024 01:01:49 +0000 Subject: [PATCH] tools: Add initial igvm-builder and node-builder/azure-linux scripting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This branch starts introducing additional scripting to build, deploy and evaluate the components used in AKS' Pod Sandboxing and Confidential Containers preview features. This includes the capability to build the IGVM file and its reference measurement file for remote attestation. Signed-off-by: Manuel Huber tools: Improve igvm-builder and node-builder/azure-linux scripting - Support for Mariner 3 builds using OS_VERSION variable - Improvements to IGVM build process and flow as described in README - Adoption of using only cloud-hypervisor-cvm on CBL-Mariner Signed-off-by: Manuel Huber tools: Add package-tools-install functionality - Add script to install kata-containers(-cc)-tools bits - Minor improvements in README.md - Minor fix in package_install - Remove echo outputs in package_build Signed-off-by: Manuel Huber tools: Enable setting IGVM SVN - Allow setting SVN parameter for IGVM build scripting Signed-off-by: Manuel Huber node-builder: introduce BUILD_TYPE variable This lets developers build and deploy Kata in debug mode without having to make manual edits to the build scripts. With BUILD_TYPE=debug (default is release): * The agent is built in debug mode. * The agent is built with a permissive policy (using allow-all.rego). * The shim debug config file is used, ie. we create the symlink configuration-clh-snp-debug.toml <- configuration-clh-snp.toml. For example, building and deploying Kata-CC in debug mode is now as simple as: make BUILD_TYPE=debug all-confpods deploy-confpods Also do note that make still lets you override the other variables even after setting BUILD_TYPE. For example, you can use the production shim config with BUILD_TYPE=debug: make BUILD_TYPE=debug SHIM_USE_DEBUG_CONFIG=no all-confpods deploy-confpods Signed-off-by: Aurélien Bombo node-builder: introduce SHIM_REDEPLOY_CONFIG See README: when SHIM_REDEPLOY_CONFIG=no, the shim configuration is NOT redeployed, so that potential config changes made directly on the host during development aren't lost. Signed-off-by: Aurélien Bombo node-builder: Use img for Pod Sandboxing Switch from UVM initrd to image format Signed-off-by: Manuel Huber node-builder: Adapt README instructions - Sanitize containerd config snippet - Set podOverhead for Kata runtime class Signed-off-by: Manuel Huber tools: Adapt AGENT_POLICY_FILE path - Adapt path in uvm_build.sh script to comply with the usptream changes we pulled in Signed-off-by: Manuel Huber node-builder: Use Azure Linux 3 as default path - update recipe and node-builder scripting - change default value on rootfs-builder Signed-off-by: Manuel Huber node-builder: Deploy-only for AzL3 VMs - split deployment sections in node-builder README.md - install jq, curl dependencies within IGVM script - add path parameter to UVM install script Signed-off-by: Manuel Huber node-builder: Minor updates to README.md - no longer install make package, is part of meta package - remove superfluous popd - add note on permissive policy for ConfPods UVM builds Signed-off-by: Manuel Huber node-builder: Updates to README.md - with the latest 3.2.0.azl4 package on PMC, can remove OS_VERSION parameter and use the make deploy calls instead of copying files by hand for variant I (now aligned with Variant II) - with the latest changes on msft-main, set the podOverhead to 600Mi Signed-off-by: Manuel Huber node-builder: Fix SHIM_USE_DEBUG_CONFIG behavior Using a symlink would create a cycle after calling this script again when copying the final configuration at line 74 so we just use cp instead. Also, I moved this block to the end of the file to properly override the final config file. Signed-off-by: Aurélien Bombo node-builder: Build and install debug configuration for pod sandboxing For ease of debugging, install a configuration-clh-debug.toml for pod sandboxing as we do in Conf pods. Signed-off-by: Cameron Baird runtime: remove clh-snp config file usage in makefile Not needed to build vanilla kata Signed-off-by: Saul Paredes package_tools_install.sh: include nsdax.gpl.c Include nsdax.gpl.c Signed-off-by: Saul Paredes --- .gitignore | 20 ++ tools/osbuilder/.gitignore | 2 + tools/osbuilder/Makefile | 17 +- .../igvm-builder/azure-linux/config.sh | 25 ++ .../igvm-builder/azure-linux/igvm_lib.sh | 70 ++++ tools/osbuilder/igvm-builder/igvm_builder.sh | 82 +++++ .../node-builder/azure-linux/Makefile | 77 +++++ .../node-builder/azure-linux/README.md | 312 ++++++++++++++++++ .../node-builder/azure-linux/clean.sh | 72 ++++ .../node-builder/azure-linux/common.sh | 66 ++++ .../node-builder/azure-linux/package_build.sh | 97 ++++++ .../azure-linux/package_install.sh | 73 ++++ .../azure-linux/package_tools_install.sh | 66 ++++ .../node-builder/azure-linux/uvm_build.sh | 76 +++++ .../node-builder/azure-linux/uvm_install.sh | 42 +++ 15 files changed, 1095 insertions(+), 2 deletions(-) create mode 100644 tools/osbuilder/igvm-builder/azure-linux/config.sh create mode 100644 tools/osbuilder/igvm-builder/azure-linux/igvm_lib.sh create mode 100755 tools/osbuilder/igvm-builder/igvm_builder.sh create mode 100644 tools/osbuilder/node-builder/azure-linux/Makefile create mode 100644 tools/osbuilder/node-builder/azure-linux/README.md create mode 100755 tools/osbuilder/node-builder/azure-linux/clean.sh create mode 100755 tools/osbuilder/node-builder/azure-linux/common.sh create mode 100755 tools/osbuilder/node-builder/azure-linux/package_build.sh create mode 100755 tools/osbuilder/node-builder/azure-linux/package_install.sh create mode 100755 tools/osbuilder/node-builder/azure-linux/package_tools_install.sh create mode 100755 tools/osbuilder/node-builder/azure-linux/uvm_build.sh create mode 100755 tools/osbuilder/node-builder/azure-linux/uvm_install.sh diff --git a/.gitignore b/.gitignore index 94b91954a9..5042e49522 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,23 @@ src/tools/log-parser/kata-log-parser tools/packaging/static-build/agent/install_libseccomp.sh .envrc .direnv + +# Microsoft-specific +.cargo/ +src/agent/samples/policy/test-input/ +src/tarfs/**/*.cmd +src/tarfs/**/*.ko +src/tarfs/**/*.mod +src/tarfs/**/*.mod.c +src/tarfs/**/*.o +src/tarfs/**/modules.order +src/tarfs/**/Module.symvers +src/tarfs-cvm/ +tools/osbuilder/kata-containers-igvm.img +tools/osbuilder/kata-containers-igvm-debug.img +tools/osbuilder/igvm-debug-measurement.cose +tools/osbuilder/igvm-measurement.cose +tools/osbuilder/root_hash.txt +tools/osbuilder/igvm.log +tools/osbuilder/kata-opa.service +tools/osbuilder/rootfs-builder/opa/ diff --git a/tools/osbuilder/.gitignore b/tools/osbuilder/.gitignore index becda84428..365d32272e 100644 --- a/tools/osbuilder/.gitignore +++ b/tools/osbuilder/.gitignore @@ -9,3 +9,5 @@ kata-containers-initrd.img kata-containers.img rootfs-builder/centos/RPM-GPG-KEY-* typescript +node-builder/azure-linux/agent-install +igvm-builder/igvm-tooling diff --git a/tools/osbuilder/Makefile b/tools/osbuilder/Makefile index 1b3aa4217a..b8383dbd28 100644 --- a/tools/osbuilder/Makefile +++ b/tools/osbuilder/Makefile @@ -8,6 +8,8 @@ TEST_RUNNER := $(MK_DIR)/tests/test_images.sh ROOTFS_BUILDER := $(MK_DIR)/rootfs-builder/rootfs.sh INITRD_BUILDER := $(MK_DIR)/initrd-builder/initrd_builder.sh IMAGE_BUILDER := $(MK_DIR)/image-builder/image_builder.sh +IGVM_BUILDER := $(MK_DIR)/igvm-builder/igvm_builder.sh +IGVM_SVN ?= 0 DISTRO ?= ubuntu BUILD_METHOD := distro @@ -16,11 +18,17 @@ AGENT_INIT ?= no USE_DOCKER ?= true ROOTFS_BUILD_DEST := $(shell pwd) IMAGES_BUILD_DEST := $(shell pwd) +IGVM_BUILD_DEST := $(shell pwd) ROOTFS_MARKER_SUFFIX := _rootfs.done TARGET_ROOTFS := $(ROOTFS_BUILD_DEST)/$(DISTRO)_rootfs TARGET_ROOTFS_MARKER := $(ROOTFS_BUILD_DEST)/.$(DISTRO)$(ROOTFS_MARKER_SUFFIX) TARGET_IMAGE := $(IMAGES_BUILD_DEST)/kata-containers.img TARGET_INITRD := $(IMAGES_BUILD_DEST)/kata-containers-initrd.img +TARGET_IGVM := $(IGVM_BUILD_DEST)/kata-containers-igvm.img +TARGET_IGVM_MSMT := $(IGVM_BUILD_DEST)/igvm-measurement.cose +TARGET_IGVM_DEBUG := $(IGVM_BUILD_DEST)/kata-containers-igvm-debug.img +TARGET_IGVM_DEBUG_MSMT:= $(IGVM_BUILD_DEST)/igvm-debug-measurement.cose +TARGET_IGVM_LOG := $(IGVM_BUILD_DEST)/igvm.log VERSION_FILE := ./VERSION VERSION := $(shell grep -v ^\# $(VERSION_FILE) 2>/dev/null || echo "unknown") @@ -86,7 +94,7 @@ endif ################################################################################ .PHONY: all -all: image initrd +all: image initrd igvm rootfs-%: $(ROOTFS_BUILD_DEST)/.%$(ROOTFS_MARKER_SUFFIX) @ # DONT remove. This is not cancellation rule. @@ -156,6 +164,10 @@ $(DRACUT_OVERLAY_DIR): mkdir -p $@/etc/modules-load.d echo $(DRACUT_KMODULES) | tr " " "\n" > $@/etc/modules-load.d/kata-modules.conf +.PHONY: igvm +igvm: $(TARGET_IMAGE) + $(IGVM_BUILDER) -o $(IGVM_BUILD_DEST) -s $(IGVM_SVN) + .PHONY: test test: $(TEST_RUNNER) "$(DISTRO)" @@ -208,7 +220,8 @@ install-scripts: .PHONY: clean clean: - rm -rf $(TARGET_ROOTFS_MARKER) $(TARGET_ROOTFS) $(TARGET_IMAGE) $(TARGET_INITRD) $(DRACUT_OVERLAY_DIR) + rm -rf $(TARGET_ROOTFS_MARKER) $(TARGET_ROOTFS) $(TARGET_IMAGE) $(TARGET_INITRD) $(DRACUT_OVERLAY_DIR) $(TARGET_IGVM) $(TARGET_IGVM_DEBUG) $(TARGET_IGVM_MSMT) $(TARGET_IGVM_DEBUG_MSMT) $(TARGET_IGVM_LOG) + rm -rf $(IGVM_TOOL_SRC) # Prints the name of the variable passed as suffix to the print- target, # E.g., if Makefile contains: diff --git a/tools/osbuilder/igvm-builder/azure-linux/config.sh b/tools/osbuilder/igvm-builder/azure-linux/config.sh new file mode 100644 index 0000000000..ade604dd60 --- /dev/null +++ b/tools/osbuilder/igvm-builder/azure-linux/config.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +# this is where the kernel-uvm package installation places bzImage, see SPEC file +BZIMAGE_BIN="/usr/share/cloud-hypervisor/bzImage" + +IGVM_EXTRACT_FOLDER="${SCRIPT_DIR}/igvm-tooling" +CLH_ACPI_TABLES_DIR="${IGVM_EXTRACT_FOLDER}/src/igvm/acpi/acpi-clh/" +IGVM_PY_FILE="${IGVM_EXTRACT_FOLDER}/src/igvm/igvmgen.py" + +IGVM_BUILD_VARS="-kernel ${BZIMAGE_BIN} -boot_mode x64 -vtl 0 -svme 1 -encrypted_page 1 -pvalidate_opt 1 -acpi ${CLH_ACPI_TABLES_DIR}" + +IGVM_KERNEL_PARAMS_COMMON="dm-mod.create=\"dm-verity,,,ro,0 ${IMAGE_DATA_SECTORS} verity 1 /dev/vda1 /dev/vda2 ${IMAGE_DATA_BLOCK_SIZE} ${IMAGE_HASH_BLOCK_SIZE} ${IMAGE_DATA_BLOCKS} 0 sha256 ${IMAGE_ROOT_HASH} ${IMAGE_SALT}\" \ + root=/dev/dm-0 rootflags=data=ordered,errors=remount-ro ro rootfstype=ext4 panic=1 no_timer_check noreplace-smp systemd.unit=kata-containers.target systemd.mask=systemd-networkd.service \ + systemd.mask=systemd-networkd.socket agent.enable_signature_verification=false" +IGVM_KERNEL_PROD_PARAMS="${IGVM_KERNEL_PARAMS_COMMON} quiet" +IGVM_KERNEL_DEBUG_PARAMS="${IGVM_KERNEL_PARAMS_COMMON} console=hvc0 systemd.log_target=console agent.log=debug agent.debug_console agent.debug_console_vport=1026" + +IGVM_FILE_NAME="kata-containers-igvm.img" +IGVM_DBG_FILE_NAME="kata-containers-igvm-debug.img" +IGVM_MEASUREMENT_FILE_NAME="igvm-measurement.cose" +IGVM_DBG_MEASUREMENT_FILE_NAME="igvm-debug-measurement.cose" diff --git a/tools/osbuilder/igvm-builder/azure-linux/igvm_lib.sh b/tools/osbuilder/igvm-builder/azure-linux/igvm_lib.sh new file mode 100644 index 0000000000..e5b1330744 --- /dev/null +++ b/tools/osbuilder/igvm-builder/azure-linux/igvm_lib.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +install_igvm_tool() +{ + echo "Installing IGVM tool" + if [ -d ${IGVM_EXTRACT_FOLDER} ]; then + echo "${IGVM_EXTRACT_FOLDER} folder already exists, assuming tool is already installed" + return + fi + + # the igvm tool on Azure Linux will soon be properly installed through dnf via kata-packages-uvm-build + # as of now, even when installing with pip3, we cannot delete the source folder as the ACPI tables are not being installed anywhere, hence relying on this folder + echo "Determining and downloading latest IGVM tooling release, and extracting including ACPI tables" + IGVM_VER=$(curl -sL "https://api.github.com/repos/microsoft/igvm-tooling/releases/latest" | jq -r .tag_name | sed 's/^v//') + curl -sL "https://github.com/microsoft/igvm-tooling/archive/refs/tags/${IGVM_VER}.tar.gz" | tar --no-same-owner -xz + mv igvm-tooling-${IGVM_VER} ${IGVM_EXTRACT_FOLDER} + + echo "Installing IGVM module msigvm (${IGVM_VER}) via pip3" + pushd ${IGVM_EXTRACT_FOLDER}/src + pip3 install --no-deps ./ + popd +} + +uninstall_igvm_tool() +{ + echo "Uninstalling IGVM tool" + + rm -rf ${IGVM_EXTRACT_FOLDER} + pip3 uninstall -y msigvm +} + +build_igvm_files() +{ + echo "Reading Kata image dm_verity root hash information from root_hash file" + ROOT_HASH_FILE="${SCRIPT_DIR}/../root_hash.txt" + + if [ ! -f "${ROOT_HASH_FILE}" ]; then + echo "Could no find image root hash file '${ROOT_HASH_FILE}', aborting" + exit 1 + fi + + IMAGE_ROOT_HASH=$(sed -e 's/Root hash:\s*//g;t;d' "${ROOT_HASH_FILE}") + IMAGE_SALT=$(sed -e 's/Salt:\s*//g;t;d' "${ROOT_HASH_FILE}") + IMAGE_DATA_BLOCKS=$(sed -e 's/Data blocks:\s*//g;t;d' "${ROOT_HASH_FILE}") + IMAGE_DATA_BLOCK_SIZE=$(sed -e 's/Data block size:\s*//g;t;d' "${ROOT_HASH_FILE}") + IMAGE_DATA_SECTORS_PER_BLOCK=$((IMAGE_DATA_BLOCK_SIZE / 512)) + IMAGE_DATA_SECTORS=$((IMAGE_DATA_BLOCKS * IMAGE_DATA_SECTORS_PER_BLOCK)) + IMAGE_HASH_BLOCK_SIZE=$(sed -e 's/Hash block size:\s*//g;t;d' "${ROOT_HASH_FILE}") + + # reloading the config file as various variables depend on above values + load_config_distro + + echo "Building (debug) IGVM files and creating their reference measurement files" + # we could call into the installed binary '~/.local/bin/igvmgen' when adding to PATH or, better, into 'python3 -m msigvm' + # however, as we still need the installation directory for the ACPI tables, we leave things as is for now + # at the same time we seem to need to call pip3 install for invoking the tool at all + python3 ${IGVM_PY_FILE} $IGVM_BUILD_VARS -o $IGVM_FILE_NAME -measurement_file $IGVM_MEASUREMENT_FILE_NAME -append "$IGVM_KERNEL_PROD_PARAMS" -svn $SVN + python3 ${IGVM_PY_FILE} $IGVM_BUILD_VARS -o $IGVM_DBG_FILE_NAME -measurement_file $IGVM_DBG_MEASUREMENT_FILE_NAME -append "$IGVM_KERNEL_DEBUG_PARAMS" -svn $SVN + + if [ "${PWD}" -ef "$(readlink -f $OUT_DIR)" ]; then + echo "OUT_DIR matches with current dir, not moving build artifacts" + else + echo "Moving build artifacts to ${OUT_DIR}" + mv $IGVM_FILE_NAME $IGVM_DBG_FILE_NAME $IGVM_MEASUREMENT_FILE_NAME $IGVM_DBG_MEASUREMENT_FILE_NAME $OUT_DIR + fi +} diff --git a/tools/osbuilder/igvm-builder/igvm_builder.sh b/tools/osbuilder/igvm-builder/igvm_builder.sh new file mode 100755 index 0000000000..8e539f69d9 --- /dev/null +++ b/tools/osbuilder/igvm-builder/igvm_builder.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +SCRIPT_DIR="$(dirname $(readlink -f $0))" + +# distro-specific config file +typeset -r CONFIG_SH="config.sh" + +# Name of an optional distro-specific file which, if it exists, must implement the +# install_igvm_tool, build_igvm_files, and uninstall_igvm_tool functions. +typeset -r LIB_SH="igvm_lib.sh" + +load_config_distro() +{ + distro_config_dir="${SCRIPT_DIR}/${DISTRO}" + + [ -d "${distro_config_dir}" ] || die "Could not find configuration directory '${distro_config_dir}'" + + if [ -e "${distro_config_dir}/${LIB_SH}" ]; then + igvm_lib="${distro_config_dir}/${LIB_SH}" + echo "igvm_lib.sh file found. Loading content" + source "${igvm_lib}" + fi + + # Source config.sh from distro, depends on root_hash based variables here + igvm_config="${distro_config_dir}/${CONFIG_SH}" + source "${igvm_config}" +} + +DISTRO="azure-linux" +MODE="build" + +while getopts ":o:s:iu" OPTIONS; do + case "${OPTIONS}" in + o ) OUT_DIR=$OPTARG ;; + s ) SVN=$OPTARG ;; + i ) MODE="install" ;; + u ) MODE="uninstall" ;; + \? ) + echo "Error - Invalid Option: -$OPTARG" 1>&2 + exit 1 + ;; + : ) + echo "Error - Invalid Option: -$OPTARG requires an argument" 1>&2 + exit 1 + ;; + esac +done + +echo "IGVM builder script" +echo "-- OUT_DIR -> $OUT_DIR" +echo "-- SVN -> $SVN" +echo "-- DISTRO -> $DISTRO" +echo "-- MODE -> $MODE" + +if [ -n "$DISTRO" ]; then + load_config_distro +else + echo "DISTRO must be specified" + exit 1 +fi + +case "$MODE" in + "install") + install_igvm_tool + ;; + "uninstall") + uninstall_igvm_tool + ;; + "build") + build_igvm_files + ;; +esac diff --git a/tools/osbuilder/node-builder/azure-linux/Makefile b/tools/osbuilder/node-builder/azure-linux/Makefile new file mode 100644 index 0000000000..85ebf59e21 --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/Makefile @@ -0,0 +1,77 @@ +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# +BUILD_TYPE := release + +export SHIM_REDEPLOY_CONFIG := yes + +ifeq ($(BUILD_TYPE),debug) + export AGENT_BUILD_TYPE := debug + export AGENT_POLICY_FILE := allow-all.rego + export SHIM_USE_DEBUG_CONFIG := yes +else + export AGENT_BUILD_TYPE := release + export AGENT_POLICY_FILE := allow-set-policy.rego + export SHIM_USE_DEBUG_CONFIG := no +endif + +.PHONY: all +all: package uvm + +.PHONY: all-confpods +all-confpods: package-confpods uvm-confpods + +.PHONY: package +package: + ./package_build.sh + +.PHONY: package-confpods +package-confpods: + CONF_PODS=yes ./package_build.sh + +.PHONY: uvm +uvm: + ./uvm_build.sh + +.PHONY: uvm-confpods +uvm-confpods: + CONF_PODS=yes ./uvm_build.sh + +.PHONY: clean +clean: + ./clean.sh + +.PHONY: clean-confpods +clean-confpods: + CONF_PODS=yes ./clean.sh + +.PHONY: deploy +deploy: deploy-package deploy-uvm + +.PHONY: deploy-package +deploy-package: + ./package_install.sh + +.PHONY: deploy-package-tools +deploy-package-tools: + ./package_tools_install.sh + +.PHONY: deploy-uvm +deploy-uvm: + ./uvm_install.sh + +.PHONY: deploy-confpods +deploy-confpods: deploy-confpods-package deploy-confpods-uvm + +.PHONY: deploy-confpods-package +deploy-confpods-package: + CONF_PODS=yes ./package_install.sh + +.PHONY: deploy-confpods-package-tools +deploy-confpods-package-tools: + CONF_PODS=yes ./package_tools_install.sh + +.PHONY: deploy-confpods-uvm +deploy-confpods-uvm: + CONF_PODS=yes ./uvm_install.sh diff --git a/tools/osbuilder/node-builder/azure-linux/README.md b/tools/osbuilder/node-builder/azure-linux/README.md new file mode 100644 index 0000000000..fbeef9385b --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/README.md @@ -0,0 +1,312 @@ +# Overview + +This guide serves as a reference on how to build and install the underlying software stack for *Pod Sandboxing with AKS* and for *Confidential Containers on AKS* using Azure Linux. +This enables running Kata (Confidential) Containers via the OCI interface, or via a local kubelet, or leveraging AKS' Kubernetes solution. + +In the following, the terms *Kata* and *Kata-CC* refer to *Pod Sandboxing with AKS* and *Confidential Containers on AKS*, respectively. +The term *building* refers to build the components from source, whereas the term *installing* refers to utilizing components released by the Azure Linux team for straightforward evaluation. + +The guide provides the steps for two different environments: +- Azure Linux 3 based systems, such as Azure VMs + - Variant I: Utilize released components + - Variant II: Build components from source +- AKS nodes (based on Azure Linux 2 as of today) + +# Steps for Azure Linux 3 based environments + +## Set up environment + +While build can happen in any Azure Linux 3 based environment, the stack can only be evaluated on environments with proper virtualization support and, for Kata-CC, on top of AMD SEV-SNP. An example of such environment are Azure Linux 3 based Azure VMs using a proper SKU: +- Deploy an Azure Linux 3 VM via `az vm create` using a [CC vm size SKU](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasccv5-dcadsccv5-series) + - Example: `az vm create --resource-group --name --os-disk-size-gb --public-ip-sku Standard --size --admin-username azureuser --ssh-key-values --image ` +- SSH onto the VM + +Not validated for evaluation: Install [Azure Linux 3](https://github.com/microsoft/azurelinux) on a bare metal machine supporting AMD SEV-SNP. + +To merely build the stack, we refer to the official [Azure Linux GitHub page](https://github.com/microsoft/azurelinux) to set up an Azure Linux 3 environment. + +## Deploy required host packages (incl. VMM, SEV-SNP capable kernel and Microsoft Hypervisor) and extend containerd configuration + +Install relevant packages, append a configuration snippet to `/etc/containerd/config.toml` to register the Kata(-CC) handlers, then reboot the system: +``` +sudo dnf -y makecache +sudo dnf -y install kata-packages-host + +sudo tee -a /etc/containerd/config.toml 2&>1 <` + - For build and deployment of both Kata and Kata-CC artifacts, first run the `make all` and `make deploy` commands to build and install the Kata Containers for AKS components followed by `make clean`, and then run `make all-confpods` and `make deploy-confpods` to build and install the Confidential Containers for AKS components - or vice versa (using `make clean-confpods`). + +## Debug builds + +This section describes how to build and deploy in debug mode. + +`make all-confpods` takes the following variables: + + * `AGENT_BUILD_TYPE`: Specify `release` (default) to build the agent in + release mode, or `debug` to build it in debug mode. + * `AGENT_POLICY_FILE`: Specify `allow-set-policy.rego` (default) to use + a restrictive policy, or `allow-all.rego` to use a permissive policy. + +`make deploy-confpods` takes the following variable: + + * `SHIM_USE_DEBUG_CONFIG`: Specify `no` (default) to use the production + configuration, or `yes` to use the debug configuration (all debug + logging enabled). In this case you'll want to enable debug logging + in containerd as well. Note that this variable has no effect if + `SHIM_REDEPLOY_CONFIG=no`. + +In general, you can specify the debug configuration for all the above +variables by using `BUILD_TYPE=debug` as such: + +```shell +sudo make BUILD_TYPE=debug all-confpods deploy-confpods +``` + +Also note that make still lets you override the other variables even +after setting `BUILD_TYPE`. For example, you can use the production shim +config with `BUILD_TYPE=debug`: + +```shell +sudo make BUILD_TYPE=debug SHIM_USE_DEBUG_CONFIG=no all-confpods deploy-confpods +``` + +### Prevent redeploying the shim configuration + +If you're manually modifying the shim configuration directly on the host +during development and you don't want to redeploy and overwrite that +file each time you redeploy binaries, you can separately specify the +`SHIM_REDEPLOY_CONFIG` (default `yes`): + +```shell +sudo make SHIM_REDEPLOY_CONFIG=no all-confpods deploy-confpods +``` + +Note that this variable is independent from the other variables +mentioned above. So if you want to avoid redeploying the shim +configuration AND build in debug mode, you have to use the following +command: + +```shell +sudo make BUILD_TYPE=debug SHIM_REDEPLOY_CONFIG=no all-confpods deploy-confpods +``` + +## Optional build step: Build and deploy the containerd fork from scratch + +``` +git clone --depth 1 --branch tardev-v1.7.7 https://github.com/microsoft/confidential-containers-containerd.git +pushd confidential-containers-containerd/ +GODEBUG=1 make +popd +``` + +Overwrite existing containerd binary, restart service: +``` +sudo cp -a --backup=numbered confidential-containers-containerd/bin/containerd /usr/bin/containerd +sudo systemctl restart containerd +``` + +# Run Kata (Confidential) Containers + +## Run via CRI or via containerd API + +Use e.g. `crictl` (or `ctr`) to schedule Kata (Confidential) containers, referencing either the Kata or Kata-CC handlers. + +Note: On Kubernetes nodes, pods created via `crictl` will be deleted by the control plane. + +The following instructions serve as a general reference: +- Install `crictl`, `cni` binaries, and set runtime endpoint in `crictl` configuration: + + ``` + sudo dnf -y install cri-tools cni + sudo crictl config --set runtime-endpoint=unix:///run/containerd/containerd.sock + ``` + +- Set a proper CNI configuration and create a sample pod manifest: This step is omitted as it depends on the individual needs. + +- Run pods with `crictl`, for example: + + `sudo crictl runp -T 30s -r ` + +- Run containers with `ctr`, for example a confidential container: + + `sudo ctr -n=k8s.io image pull --snapshotter=tardev docker.io/library/busybox:latest` + + `sudo ctr -n=k8s.io run --cni --runtime io.containerd.run.kata-cc.v2 --runtime-config-path /opt/confidential-containers/share/defaults/kata-containers/configuration-clh-snp.toml --snapshotter tardev -t --rm docker.io/library/busybox:latest hello sh` + +For further usage we refer to the upstream `crictl` (or `ctr`) and CNI documentation. + +## Run via Kubernetes + +If your environment was set up through `az aks create` the respective node is ready to run Kata (Confidential) Containers as AKS Kubernetes pods. +Other types of Kubernetes clusters should work as well. While this document doesn't cover how to set-up those clusters, you can +apply the kata and kata-cc runtime classes to your cluster from the machine that holds your kubeconfig file, for example: +``` +cat << EOF > runtimeClass-kata-cc.yaml +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata-cc +handler: kata-cc +overhead: + podFixed: + memory: "600Mi" +scheduling: + nodeSelector: + katacontainers.io/kata-runtime: "true" +EOF + +cat << EOF > runtimeClass-kata.yaml +kind: RuntimeClass +apiVersion: node.k8s.io/v1 +metadata: + name: kata +handler: kata +overhead: + podFixed: + memory: "600Mi" +scheduling: + nodeSelector: + katacontainers.io/kata-runtime: "true" +EOF + +kubectl apply -f runtimeClass-kata-cc.yaml -f runtimeClass-kata.yaml +``` + +And label your node appropriately: +``` +kubectl label node katacontainers.io/kata-runtime=true +``` + +# Build attestation scenarios +The build artifacts for the UVM ConfPods target include an IGVM file and a so-called reference measurement file (unsigned). The IGVM file is being loaded into memory measured by the AMD SEV-SNP PSP (when a Confidental Container is started). With this and with the Kata security policy feature, attestation scenarios can be built: the reference measurement (often referred to as 'endorsement') can, for example, be signed by a trusted party (such as Microsoft in Confidential Containers on AKS) and be compared with the actual measurement part of the attestation report. The latter can be retrieved through respective system calls inside the Kata Confidential Containers Guest VM. + +An example for an attestation scenario through Microsoft Azure Attestation is presented in [Attestation in Confidential containers on Azure Container Instances](https://learn.microsoft.com/en-us/azure/container-instances/confidential-containers-attestation-concepts). +Documentation for leveraging the Kata security policy feature can be found in [Security policy for Confidential Containers on Azure Kubernetes Service](https://learn.microsoft.com/en-us/azure/confidential-computing/confidential-containers-aks-security-policy). diff --git a/tools/osbuilder/node-builder/azure-linux/clean.sh b/tools/osbuilder/node-builder/azure-linux/clean.sh new file mode 100755 index 0000000000..11cf6fb03b --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/clean.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +script_dir="$(dirname $(readlink -f $0))" +repo_dir="${script_dir}/../../../../" + +common_file="common.sh" +source "${common_file}" + +pushd "${repo_dir}" + +echo "Clean debug shim config" +pushd src/runtime/config/ +rm -f "${SHIM_DBG_CONFIG_FILE_NAME}" +popd + +echo "Clean runtime build" +pushd src/runtime/ +make clean SKIP_GO_VERSION_CHECK=1 +popd + +echo "Clean agent build" +pushd src/agent/ +make clean +popd + +rm -rf ${AGENT_INSTALL_DIR} + +echo "Clean UVM build" +pushd tools/osbuilder/ +sudo -E PATH=$PATH make DISTRO=cbl-mariner clean +popd + +echo "Clean IGVM tool installation" + + +if [ "${CONF_PODS}" == "yes" ]; then + + echo "Clean tardev-snapshotter tarfs driver build" + pushd src/tarfs + set_uvm_kernel_vars + if [ -n "${UVM_KERNEL_HEADER_DIR}" ]; then + make clean KDIR=${UVM_KERNEL_HEADER_DIR} + fi + popd + + echo "Clean utarfs binary build" + pushd src/utarfs/ + make clean + popd + + echo "Clean tardev-snapshotter overlay binary build" + pushd src/overlay/ + make clean + popd + + echo "Clean tardev-snapshotter service build" + pushd src/tardev-snapshotter/ + make clean + popd +fi + +popd diff --git a/tools/osbuilder/node-builder/azure-linux/common.sh b/tools/osbuilder/node-builder/azure-linux/common.sh new file mode 100755 index 0000000000..8b0665c47a --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/common.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +script_dir="$(dirname $(readlink -f $0))" +lib_file="${script_dir}/../../scripts/lib.sh" +source "${lib_file}" + +OS_VERSION=$(sort -r /etc/*-release | gawk 'match($0, /^(VERSION_ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }' | tr -d '"') + +([[ "${OS_VERSION}" == "2.0" ]] || [[ "${OS_VERSION}" == "3.0" ]]) || die "OS_VERSION: value '${OS_VERSION}' must equal 3.0 (default) or 2.0" + +if [ "${CONF_PODS}" == "yes" ]; then + INSTALL_PATH_PREFIX="/opt/confidential-containers" + UVM_TOOLS_PATH_OSB="${INSTALL_PATH_PREFIX}/uvm/tools/osbuilder" + UVM_TOOLS_PATH_SRC="${INSTALL_PATH_PREFIX}/uvm/src" + UVM_PATH_DEFAULT="${INSTALL_PATH_PREFIX}/share/kata-containers" + IMG_FILE_NAME="kata-containers.img" + IGVM_FILE_NAME="kata-containers-igvm.img" + IGVM_DBG_FILE_NAME="kata-containers-igvm-debug.img" + UVM_MEASUREMENT_FILE_NAME="igvm-measurement.cose" + UVM_DBG_MEASUREMENT_FILE_NAME="igvm-debug-measurement.cose" + SHIM_CONFIG_PATH="${INSTALL_PATH_PREFIX}/share/defaults/kata-containers" + SHIM_CONFIG_FILE_NAME="configuration-clh-snp.toml" + SHIM_CONFIG_INST_FILE_NAME="${SHIM_CONFIG_FILE_NAME}" + SHIM_DBG_CONFIG_FILE_NAME="configuration-clh-snp-debug.toml" + SHIM_DBG_CONFIG_INST_FILE_NAME="${SHIM_DBG_CONFIG_FILE_NAME}" + DEBUGGING_BINARIES_PATH="${INSTALL_PATH_PREFIX}/bin" + SHIM_BINARIES_PATH="/usr/local/bin" + SHIM_BINARY_NAME="containerd-shim-kata-cc-v2" +else + INSTALL_PATH_PREFIX="/usr" + UVM_TOOLS_PATH_OSB="/opt/kata-containers/uvm/tools/osbuilder" + UVM_TOOLS_PATH_SRC="/opt/kata-containers/uvm/src" + UVM_PATH_DEFAULT="${INSTALL_PATH_PREFIX}/share/kata-containers" + IMG_FILE_NAME="kata-containers.img" + SHIM_CONFIG_PATH="${INSTALL_PATH_PREFIX}/share/defaults/kata-containers" + SHIM_CONFIG_FILE_NAME="configuration-clh.toml" + SHIM_CONFIG_INST_FILE_NAME="configuration.toml" + SHIM_DBG_CONFIG_FILE_NAME="configuration-clh-debug.toml" + SHIM_DBG_CONFIG_INST_FILE_NAME="${SHIM_DBG_CONFIG_FILE_NAME}" + DEBUGGING_BINARIES_PATH="${INSTALL_PATH_PREFIX}/local/bin" + SHIM_BINARIES_PATH="${INSTALL_PATH_PREFIX}/local/bin" + SHIM_BINARY_NAME="containerd-shim-kata-v2" +fi + +# this is where cloud-hypervisor-cvm gets installed (see package SPEC) +CLOUD_HYPERVISOR_LOCATION="/usr/bin/cloud-hypervisor" +# this is where kernel-uvm gets installed (see package SPEC) +KERNEL_BINARY_LOCATION="/usr/share/cloud-hypervisor/vmlinux.bin" +# Mariner 3: different binary name +if [ "${OS_VERSION}" == "2.0" ]; then + VIRTIOFSD_BINARY_LOCATION="/usr/libexec/virtiofsd-rs" +else + VIRTIOFSD_BINARY_LOCATION="/usr/libexec/virtiofsd" +fi + +AGENT_INSTALL_DIR="${script_dir}/agent-install" + +set_uvm_kernel_vars() { + UVM_KERNEL_VERSION=$(rpm -q --queryformat '%{VERSION}' kernel-uvm-devel) + UVM_KERNEL_RELEASE=$(rpm -q --queryformat '%{RELEASE}' kernel-uvm-devel) + UVM_KERNEL_HEADER_DIR="/usr/src/linux-headers-${UVM_KERNEL_VERSION}-${UVM_KERNEL_RELEASE}" +} diff --git a/tools/osbuilder/node-builder/azure-linux/package_build.sh b/tools/osbuilder/node-builder/azure-linux/package_build.sh new file mode 100755 index 0000000000..16fe9657d6 --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/package_build.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +AGENT_BUILD_TYPE=${AGENT_BUILD_TYPE:-release} +CONF_PODS=${CONF_PODS:-no} + +script_dir="$(dirname $(readlink -f $0))" +repo_dir="${script_dir}/../../../../" + +common_file="common.sh" +source "${common_file}" + +# these options ensure we produce the proper CLH config file +runtime_make_flags="SKIP_GO_VERSION_CHECK=1 QEMUCMD= FCCMD= ACRNCMD= STRATOVIRTCMD= DEFAULT_HYPERVISOR=cloud-hypervisor + DEFMEMSZ=256 DEFSTATICSANDBOXWORKLOADMEM=1792 DEFVIRTIOFSDAEMON=${VIRTIOFSD_BINARY_LOCATION} PREFIX=${INSTALL_PATH_PREFIX}" + +# - for vanilla Kata we use the kernel binary. For ConfPods we use IGVM, so no need to provide kernel path. +# - for vanilla Kata we explicitly set DEFSTATICRESOURCEMGMT_CLH. For ConfPods, +# the variable DEFSTATICRESOURCEMGMT_TEE is used which defaults to false +# - for ConfPods we explicitly set the cloud-hypervisor path. The path is independent of the PREFIX variable +# as we have a single CLH binary for both vanilla Kata and ConfPods +if [ "${CONF_PODS}" == "no" ]; then + runtime_make_flags+=" DEFSTATICRESOURCEMGMT_CLH=true KERNELPATH_CLH=${KERNEL_BINARY_LOCATION}" +else + runtime_make_flags+=" CLHPATH=${CLOUD_HYPERVISOR_LOCATION}" +fi + +# On Mariner 3.0 we use cgroupsv2 with a single sandbox cgroup +if [ "${OS_VERSION}" == "3.0" ]; then + runtime_make_flags+=" DEFSANDBOXCGROUPONLY=true" +fi + +agent_make_flags="LIBC=gnu OPENSSL_NO_VENDOR=Y DESTDIR=${AGENT_INSTALL_DIR} BUILD_TYPE=${AGENT_BUILD_TYPE}" + +if [ "${CONF_PODS}" == "yes" ]; then + agent_make_flags+=" AGENT_POLICY=yes" +fi + +pushd "${repo_dir}" + +if [ "${CONF_PODS}" == "yes" ]; then + + echo "Building utarfs binary" + pushd src/utarfs/ + make all + popd + + echo "Building kata-overlay binary" + pushd src/overlay/ + make all + popd + + echo "Building tardev-snapshotter service binary" + pushd src/tardev-snapshotter/ + make all + popd +fi + +echo "Building shim binary and configuration" +pushd src/runtime/ +if [ "${CONF_PODS}" == "yes" ] || [ "${OS_VERSION}" == "3.0" ]; then + make ${runtime_make_flags} +else + # Mariner 2 pod sandboxing uses cgroupsv1 - note: cannot add the kernelparams in above assignments, + # leads to quotation issue. Hence, implementing the conditional check right here at the time of the make command + make ${runtime_make_flags} KERNELPARAMS="systemd.legacy_systemd_cgroup_controller=yes systemd.unified_cgroup_hierarchy=0" +fi +popd + +pushd src/runtime/config/ +echo "Creating shim debug configuration" +cp "${SHIM_CONFIG_FILE_NAME}" "${SHIM_DBG_CONFIG_FILE_NAME}" +sed -i '/^#enable_debug =/s|^#||g' "${SHIM_DBG_CONFIG_FILE_NAME}" +sed -i '/^#debug_console_enabled =/s|^#||g' "${SHIM_DBG_CONFIG_FILE_NAME}" + +if [ "${CONF_PODS}" == "yes" ]; then + echo "Adding debug igvm to SNP shim debug configuration" + sed -i "s|${IGVM_FILE_NAME}|${IGVM_DBG_FILE_NAME}|g" "${SHIM_DBG_CONFIG_FILE_NAME}" +fi +popd + +echo "Building agent binary and generating service files" +pushd src/agent/ +make ${agent_make_flags} +make install ${agent_make_flags} +popd + +popd diff --git a/tools/osbuilder/node-builder/azure-linux/package_install.sh b/tools/osbuilder/node-builder/azure-linux/package_install.sh new file mode 100755 index 0000000000..791cff5d92 --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/package_install.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +CONF_PODS=${CONF_PODS:-no} +PREFIX=${PREFIX:-} +SHIM_REDEPLOY_CONFIG=${SHIM_REDEPLOY_CONFIG:-yes} +SHIM_USE_DEBUG_CONFIG=${SHIM_USE_DEBUG_CONFIG:-no} +START_SERVICES=${START_SERVICES:-yes} + +script_dir="$(dirname $(readlink -f $0))" +repo_dir="${script_dir}/../../../../" + +common_file="common.sh" +source "${common_file}" + +pushd "${repo_dir}" + +echo "Creating target directories" +mkdir -p "${PREFIX}/${SHIM_CONFIG_PATH}" +mkdir -p "${PREFIX}/${DEBUGGING_BINARIES_PATH}" +mkdir -p "${PREFIX}/${SHIM_BINARIES_PATH}" + +if [ "${CONF_PODS}" == "yes" ]; then + echo "Installing tardev-snapshotter binaries and service file" + mkdir -p ${PREFIX}/usr/sbin + cp -a --backup=numbered src/utarfs/target/release/utarfs ${PREFIX}/usr/sbin/mount.tar + mkdir -p ${PREFIX}/usr/bin + cp -a --backup=numbered src/overlay/target/release/kata-overlay ${PREFIX}/usr/bin/ + cp -a --backup=numbered src/tardev-snapshotter/target/release/tardev-snapshotter ${PREFIX}/usr/bin/ + mkdir -p ${PREFIX}/usr/lib/systemd/system/ + cp -a --backup=numbered src/tardev-snapshotter/tardev-snapshotter.service ${PREFIX}/usr/lib/systemd/system/ + + echo "Enabling and starting snapshotter service" + if [ "${START_SERVICES}" == "yes" ]; then + systemctl enable tardev-snapshotter && systemctl daemon-reload && systemctl restart tardev-snapshotter + fi +fi + +echo "Installing diagnosability binaries (monitor, runtime, collect-data script)" +cp -a --backup=numbered src/runtime/kata-monitor "${PREFIX}/${DEBUGGING_BINARIES_PATH}" +cp -a --backup=numbered src/runtime/kata-runtime "${PREFIX}/${DEBUGGING_BINARIES_PATH}" +chmod +x src/runtime/data/kata-collect-data.sh +cp -a --backup=numbered src/runtime/data/kata-collect-data.sh "${PREFIX}/${DEBUGGING_BINARIES_PATH}" + +echo "Installing shim binary" +cp -a --backup=numbered src/runtime/containerd-shim-kata-v2 "${PREFIX}/${SHIM_BINARIES_PATH}"/"${SHIM_BINARY_NAME}" + +if [ "${SHIM_REDEPLOY_CONFIG}" == "yes" ]; then + echo "Installing shim configuration" + cp -a --backup=numbered src/runtime/config/"${SHIM_CONFIG_FILE_NAME}" "${PREFIX}/${SHIM_CONFIG_PATH}/${SHIM_CONFIG_INST_FILE_NAME}" + cp -a --backup=numbered src/runtime/config/"${SHIM_DBG_CONFIG_FILE_NAME}" "${PREFIX}/${SHIM_CONFIG_PATH}/${SHIM_DBG_CONFIG_INST_FILE_NAME}" + + if [ "${SHIM_USE_DEBUG_CONFIG}" == "yes" ]; then + # We simply override the release config with the debug config, + # which is probably fine when debugging. Not symlinking as that + # would create cycles the next time this script is called. + echo "Overriding shim configuration with debug configuration" + cp -a --backup=numbered src/runtime/config/"${SHIM_DBG_CONFIG_FILE_NAME}" "${PREFIX}/${SHIM_CONFIG_PATH}/${SHIM_CONFIG_INST_FILE_NAME}" + fi +else + echo "Skipping installation of shim configuration" +fi + +popd diff --git a/tools/osbuilder/node-builder/azure-linux/package_tools_install.sh b/tools/osbuilder/node-builder/azure-linux/package_tools_install.sh new file mode 100755 index 0000000000..a1f32ca885 --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/package_tools_install.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +CONF_PODS=${CONF_PODS:-no} +PREFIX=${PREFIX:-} + +script_dir="$(dirname $(readlink -f $0))" +repo_dir="${script_dir}/../../../../" + +common_file="common.sh" +source "${common_file}" + +pushd "${repo_dir}" + +echo "Creating target directories" +mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_OSB}/scripts" +mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_OSB}/rootfs-builder/cbl-mariner" +mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_OSB}/image-builder" +mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/agent-install/usr/bin" +mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/agent-install/usr/lib/systemd/system" + +if [ "${CONF_PODS}" == "yes" ]; then + mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_SRC}/kata-opa" + mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_SRC}/tarfs" + mkdir -p "${PREFIX}/${UVM_TOOLS_PATH_OSB}/igvm-builder/azure-linux" +fi + +echo "Installing UVM build scripting" +cp -a --backup=numbered tools/osbuilder/Makefile "${PREFIX}/${UVM_TOOLS_PATH_OSB}/" +cp -a --backup=numbered tools/osbuilder/scripts/lib.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/scripts/" +cp -a --backup=numbered tools/osbuilder/rootfs-builder/rootfs.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/rootfs-builder/" +cp -a --backup=numbered tools/osbuilder/rootfs-builder/cbl-mariner/config.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/rootfs-builder/cbl-mariner/" +cp -a --backup=numbered tools/osbuilder/rootfs-builder/cbl-mariner/rootfs_lib.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/rootfs-builder/cbl-mariner/" +cp -a --backup=numbered tools/osbuilder/image-builder/image_builder.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/image-builder/" +cp -a --backup=numbered tools/osbuilder/image-builder/nsdax.gpl.c "${PREFIX}/${UVM_TOOLS_PATH_OSB}/image-builder/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/Makefile "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/clean.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/common.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/uvm_build.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/uvm_install.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/" + +echo "Installing agent binary and service files" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/agent-install/usr/bin/kata-agent "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/agent-install/usr/bin/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/agent-install/usr/lib/systemd/system/kata-containers.target "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/agent-install/usr/lib/systemd/system/" +cp -a --backup=numbered tools/osbuilder/node-builder/azure-linux/agent-install/usr/lib/systemd/system/kata-agent.service "${PREFIX}/${UVM_TOOLS_PATH_OSB}/node-builder/azure-linux/agent-install/usr/lib/systemd/system/" + +if [ "${CONF_PODS}" == "yes" ]; then + cp -a --backup=numbered src/kata-opa/allow-all.rego "${PREFIX}/${UVM_TOOLS_PATH_SRC}/kata-opa/" + cp -a --backup=numbered src/kata-opa/allow-set-policy.rego "${PREFIX}/${UVM_TOOLS_PATH_SRC}/kata-opa/" + cp -a --backup=numbered src/tarfs/Makefile "${PREFIX}/${UVM_TOOLS_PATH_SRC}/tarfs/" + cp -a --backup=numbered src/tarfs/tarfs.c "${PREFIX}/${UVM_TOOLS_PATH_SRC}/tarfs/" + cp -a --backup=numbered tools/osbuilder/igvm-builder/igvm_builder.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/igvm-builder/" + cp -a --backup=numbered tools/osbuilder/igvm-builder/azure-linux/config.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/igvm-builder/azure-linux/" + cp -a --backup=numbered tools/osbuilder/igvm-builder/azure-linux/igvm_lib.sh "${PREFIX}/${UVM_TOOLS_PATH_OSB}/igvm-builder/azure-linux/" +fi + +popd diff --git a/tools/osbuilder/node-builder/azure-linux/uvm_build.sh b/tools/osbuilder/node-builder/azure-linux/uvm_build.sh new file mode 100755 index 0000000000..6734277650 --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/uvm_build.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +AGENT_POLICY_FILE="${AGENT_POLICY_FILE:-allow-set-policy.rego}" +CONF_PODS=${CONF_PODS:-no} +IGVM_SVN=${IGVM_SVN:-0} + +script_dir="$(dirname $(readlink -f $0))" +repo_dir="${script_dir}/../../../../" + +agent_policy_file_abs="${repo_dir}/src/kata-opa/${AGENT_POLICY_FILE}" + +common_file="common.sh" +source "${common_file}" + +# This ensures that a pre-built agent binary is being injected into the rootfs +rootfs_make_flags="AGENT_SOURCE_BIN=${AGENT_INSTALL_DIR}/usr/bin/kata-agent OS_VERSION=${OS_VERSION}" + +if [ "${CONF_PODS}" == "yes" ]; then + rootfs_make_flags+=" AGENT_POLICY=yes CONF_GUEST=yes AGENT_POLICY_FILE=${agent_policy_file_abs}" +fi + +if [ "${CONF_PODS}" == "yes" ]; then + set_uvm_kernel_vars + if [ -z "${UVM_KERNEL_HEADER_DIR}}" ]; then + exit 1 + fi +fi + +pushd "${repo_dir}" + +echo "Building rootfs and including pre-built agent binary" +pushd tools/osbuilder +# This command requires sudo because of dnf-installing packages into rootfs. As a suite, following commands require sudo as well as make clean +sudo -E PATH=$PATH make ${rootfs_make_flags} -B DISTRO=cbl-mariner rootfs +ROOTFS_PATH="$(readlink -f ./cbl-mariner_rootfs)" +popd + +echo "Installing agent service files into rootfs" +sudo cp ${AGENT_INSTALL_DIR}/usr/lib/systemd/system/kata-containers.target ${ROOTFS_PATH}/usr/lib/systemd/system/kata-containers.target +sudo cp ${AGENT_INSTALL_DIR}/usr/lib/systemd/system/kata-agent.service ${ROOTFS_PATH}/usr/lib/systemd/system/kata-agent.service + +if [ "${CONF_PODS}" == "yes" ]; then + echo "Building tarfs kernel driver and installing into rootfs" + pushd src/tarfs + make KDIR=${UVM_KERNEL_HEADER_DIR} + sudo make KDIR=${UVM_KERNEL_HEADER_DIR} KVER=${UVM_KERNEL_VERSION} INSTALL_MOD_PATH=${ROOTFS_PATH} install + popd + + echo "Building dm-verity protected image based on rootfs" + pushd tools/osbuilder + sudo -E PATH=$PATH make DISTRO=cbl-mariner MEASURED_ROOTFS=yes DM_VERITY_FORMAT=kernelinit image + popd + + echo "Building IGVM and UVM measurement files" + pushd tools/osbuilder + sudo chmod o+r root_hash.txt + sudo make igvm DISTRO=cbl-mariner IGVM_SVN=${IGVM_SVN} + popd +else + echo "Building image based on rootfs" + pushd tools/osbuilder + sudo -E PATH=$PATH make DISTRO=cbl-mariner image + popd +fi + +popd diff --git a/tools/osbuilder/node-builder/azure-linux/uvm_install.sh b/tools/osbuilder/node-builder/azure-linux/uvm_install.sh new file mode 100755 index 0000000000..09e2cfa386 --- /dev/null +++ b/tools/osbuilder/node-builder/azure-linux/uvm_install.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Microsoft Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +set -o errexit +set -o pipefail +set -o errtrace + +[ -n "$DEBUG" ] && set -x + +CONF_PODS=${CONF_PODS:-no} + +script_dir="$(dirname $(readlink -f $0))" +repo_dir="${script_dir}/../../../../" + +common_file="common.sh" +source "${common_file}" + +UVM_PATH=${UVM_PATH:-${UVM_PATH_DEFAULT}} + +pushd "${repo_dir}" + +pushd tools/osbuilder + +echo "Creating target directory" +mkdir -p "${UVM_PATH}" + +echo "Installing UVM files to target directory" +if [ "${CONF_PODS}" == "yes" ]; then + cp -a --backup=numbered "${IGVM_FILE_NAME}" "${UVM_PATH}" + cp -a --backup=numbered "${IGVM_DBG_FILE_NAME}" "${UVM_PATH}" + cp -a --backup=numbered "${UVM_MEASUREMENT_FILE_NAME}" "${UVM_PATH}" + cp -a --backup=numbered "${UVM_DBG_MEASUREMENT_FILE_NAME}" "${UVM_PATH}" +fi + +cp -a --backup=numbered "${IMG_FILE_NAME}" "${UVM_PATH}" + +popd + +popd