1
0
mirror of https://github.com/kata-containers/kata-containers.git synced 2025-05-08 00:17:31 +00:00

Merge pull request from zvonkok/module-signing

gpu: add module signing
This commit is contained in:
Zvonko Kaiser 2025-03-01 09:51:24 -05:00 committed by GitHub
commit 3f13023f5f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 120 additions and 69 deletions
tools
osbuilder/rootfs-builder
packaging

View File

@ -5,7 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
#!/bin/bash
set -xeuo pipefail
set -euo pipefail
shopt -s nullglob
shopt -s extglob
@ -21,12 +21,14 @@ base_os="jammy"
APT_INSTALL="apt -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' -yqq --no-install-recommends install"
export KBUILD_SIGN_PIN="${6:-}"
export DEBIAN_FRONTEND=noninteractive
is_feature_enabled() {
local feature="$1"
# Check if feature is in the comma-separated list
if [[ ",$nvidia_gpu_stack," == *",$feature,"* ]]; then
if [[ ",${nvidia_gpu_stack}," == *",${feature},"* ]]; then
return 0
else
return 1
@ -36,11 +38,11 @@ is_feature_enabled() {
set_driver_version_type() {
echo "chroot: Setting the correct driver version"
if [[ ",$nvidia_gpu_stack," == *",latest,"* ]]; then
if [[ ",${nvidia_gpu_stack}," == *",latest,"* ]]; then
driver_version="latest"
elif [[ ",$nvidia_gpu_stack," == *",lts,"* ]]; then
elif [[ ",${nvidia_gpu_stack}," == *",lts,"* ]]; then
driver_version="lts"
elif [[ "$nvidia_gpu_stack" =~ version=([^,]+) ]]; then
elif [[ "${nvidia_gpu_stack}" =~ version=([^,]+) ]]; then
driver_version="${BASH_REMATCH[1]}"
else
echo "No known driver spec found. Please specify \"latest\", \"lts\", or \"version=<VERSION>\"."
@ -52,9 +54,9 @@ set_driver_version_type() {
echo "chroot: Setting the correct driver type"
# driver -> enable open or closed drivers
if [[ "$nvidia_gpu_stack" =~ (^|,)driver=open($|,) ]]; then
if [[ "${nvidia_gpu_stack}" =~ (^|,)driver=open($|,) ]]; then
driver_type="-open"
elif [[ "$nvidia_gpu_stack" =~ (^|,)driver=closed($|,) ]]; then
elif [[ "${nvidia_gpu_stack}" =~ (^|,)driver=closed($|,) ]]; then
driver_type=""
fi
@ -74,7 +76,7 @@ install_nvidia_fabricmanager() {
return
}
# if run_fm_file_name exists run it
if [ -f /"${run_fm_file_name}" ]; then
if [[ -f /"${run_fm_file_name}" ]]; then
install_nvidia_fabricmanager_from_run_file
else
install_nvidia_fabricmanager_from_distribution
@ -104,24 +106,35 @@ build_nvidia_drivers() {
echo "chroot: Build NVIDIA drivers"
pushd "${driver_source_files}" >> /dev/null
local certs_dir
local kernel_version
for version in /lib/modules/*; do
kernel_version=$(basename "${version}")
certs_dir=/lib/modules/"${kernel_version}"/build/certs
signing_key=${certs_dir}/signing_key.pem
echo "chroot: Building GPU modules for: ${kernel_version}"
cp /boot/System.map-"${kernel_version}" /lib/modules/"${kernel_version}"/build/System.map
if [ "${arch_target}" == "aarch64" ]; then
if [[ "${arch_target}" == "aarch64" ]]; then
ln -sf /lib/modules/"${kernel_version}"/build/arch/arm64 /lib/modules/"${kernel_version}"/build/arch/aarch64
fi
if [ "${arch_target}" == "x86_64" ]; then
if [[ "${arch_target}" == "x86_64" ]]; then
ln -sf /lib/modules/"${kernel_version}"/build/arch/x86 /lib/modules/"${kernel_version}"/build/arch/amd64
fi
make -j "$(nproc)" CC=gcc SYSSRC=/lib/modules/"${kernel_version}"/build > /dev/null
if [[ -n "${KBUILD_SIGN_PIN}" ]]; then
mkdir -p "${certs_dir}" && mv /signing_key.* "${certs_dir}"/.
fi
make INSTALL_MOD_STRIP=1 -j "$(nproc)" CC=gcc SYSSRC=/lib/modules/"${kernel_version}"/build modules_install
make -j "$(nproc)" CC=gcc SYSSRC=/lib/modules/"${kernel_version}"/build clean > /dev/null
# The make clean above should clear also the certs directory but just in case something
# went wroing make sure the signing_key.pem is removed
[[ -e "${signing_key}" ]] && rm -f "${signing_key}"
done
# Save the modules for later so that a linux-image purge does not remove it
tar cvfa /lib/modules.save_from_purge.tar.zst /lib/modules
@ -129,14 +142,14 @@ build_nvidia_drivers() {
}
install_userspace_components() {
if [ ! -f /"${run_file_name}" ]; then
if [[ ! -f /"${run_file_name}" ]]; then
echo "chroot: Skipping NVIDIA userspace runfile components installation"
return
fi
pushd /NVIDIA-* >> /dev/null
# if aarch64 we need to remove --no-install-compat32-libs
if [ "${arch_target}" == "aarch64" ]; then
if [[ "${arch_target}" == "aarch64" ]]; then
./nvidia-installer --no-kernel-modules --no-systemd --no-nvidia-modprobe -s --x-prefix=/root
else
./nvidia-installer --no-kernel-modules --no-systemd --no-nvidia-modprobe -s --x-prefix=/root --no-install-compat32-libs
@ -146,10 +159,10 @@ install_userspace_components() {
}
prepare_run_file_drivers() {
if [ "${driver_version}" == "latest" ]; then
if [[ "${driver_version}" == "latest" ]]; then
driver_version=""
echo "chroot: Resetting driver version not supported with run-file"
elif [ "${driver_version}" == "lts" ]; then
elif [[ "${driver_version}" == "lts" ]]; then
driver_version=""
echo "chroot: Resetting driver version not supported with run-file"
fi
@ -163,16 +176,16 @@ prepare_run_file_drivers() {
# Sooner or later RIM files will be only available remotely
RIMFILE=$(ls NVIDIA-*/RIM_GH100PROD.swidtag)
if [ -e "${RIMFILE}" ]; then
if [[ -e "${RIMFILE}" ]]; then
cp NVIDIA-*/RIM_GH100PROD.swidtag /usr/share/nvidia/rim/.
fi
popd >> /dev/null
}
prepare_distribution_drivers() {
if [ "${driver_version}" == "latest" ]; then
if [[ "${driver_version}" == "latest" ]]; then
driver_version=$(apt-cache search --names-only 'nvidia-headless-no-dkms-.?.?.?-open' | awk '{ print $1 }' | tail -n 1 | cut -d'-' -f5)
elif [ "${driver_version}" == "lts" ]; then
elif [[ "${driver_version}" == "lts" ]]; then
driver_version="550"
fi
@ -195,11 +208,11 @@ prepare_distribution_drivers() {
prepare_nvidia_drivers() {
local driver_source_dir=""
if [ -f /"${run_file_name}" ]; then
if [[ -f /"${run_file_name}" ]]; then
prepare_run_file_drivers
for source_dir in /NVIDIA-*; do
if [ -d "${source_dir}" ]; then
if [[ -d "${source_dir}" ]]; then
driver_source_files="${source_dir}"/kernel${driver_type}
driver_source_dir="${source_dir}"
break
@ -211,7 +224,7 @@ prepare_nvidia_drivers() {
prepare_distribution_drivers
for source_dir in /usr/src/nvidia*; do
if [ -d "${source_dir}" ]; then
if [[ -d "${source_dir}" ]]; then
driver_source_files="${source_dir}"
driver_source_dir="${source_dir}"
break
@ -241,7 +254,7 @@ setup_apt_repositories() {
# Changing the reference here also means changes needed for cuda_keyring
# and cuda apt repository see install_dcgm for details
cat <<-CHROOT_EOF > /etc/apt/sources.list.d/${base_os}.list
cat <<-CHROOT_EOF > /etc/apt/sources.list.d/"${base_os}".list
deb [arch=amd64] http://us.archive.ubuntu.com/ubuntu ${base_os} main restricted universe multiverse
deb [arch=amd64] http://us.archive.ubuntu.com/ubuntu ${base_os}-updates main restricted universe multiverse
deb [arch=amd64] http://us.archive.ubuntu.com/ubuntu ${base_os}-security main restricted universe multiverse
@ -273,13 +286,13 @@ get_supported_gpus_from_run_file() {
local source_dir="$1"
local supported_gpus_json="${source_dir}"/supported-gpus/supported-gpus.json
jq . < "${supported_gpus_json}" | grep '"devid"' | awk '{ print $2 }' | tr -d ',"' > ${supported_gpu_devids}
jq . < "${supported_gpus_json}" | grep '"devid"' | awk '{ print $2 }' | tr -d ',"' > "${supported_gpu_devids}"
}
get_supported_gpus_from_distro_drivers() {
local supported_gpus_json=/usr/share/doc/nvidia-kernel-common-"${driver_version}"/supported-gpus.json
jq . < "${supported_gpus_json}" | grep '"devid"' | awk '{ print $2 }' | tr -d ',"' > ${supported_gpu_devids}
jq . < "${supported_gpus_json}" | grep '"devid"' | awk '{ print $2 }' | tr -d ',"' > "${supported_gpu_devids}"
}
export_driver_version() {
@ -302,8 +315,8 @@ install_nvidia_dcgm() {
[[ ${base_os} == "jammy" ]] && osver="ubuntu2204" || die "Unknown base_os ${base_os} used"
keyring="cuda-keyring_1.1-1_all.deb"
curl -O https://developer.download.nvidia.com/compute/cuda/repos/${osver}/${arch}/${keyring}
dpkg -i ${keyring} && rm -f ${keyring}
curl -O "https://developer.download.nvidia.com/compute/cuda/repos/${osver}/${arch}/${keyring}"
dpkg -i "${keyring}" && rm -f "${keyring}"
apt update
eval "${APT_INSTALL}" datacenter-gpu-manager
@ -315,7 +328,7 @@ cleanup_rootfs() {
apt-mark hold libstdc++6 libzstd1 libgnutls30 pciutils
# noble=libgnutls30t64
if [ -n "${driver_version}" ]; then
if [[ -n "${driver_version}" ]]; then
apt-mark hold libnvidia-cfg1-"${driver_version}" \
nvidia-compute-utils-"${driver_version}" \
nvidia-utils-"${driver_version}" \
@ -342,7 +355,7 @@ cleanup_rootfs() {
python3-pip software-properties-common ca-certificates \
linux-libc-dev nuitka python3-minimal
if [ -n "${driver_version}" ]; then
if [[ -n "${driver_version}" ]]; then
apt purge -yqq nvidia-headless-no-dkms-"${driver_version}${driver_type}" \
nvidia-kernel-source-"${driver_version}${driver_type}" -yqq
fi

View File

@ -5,7 +5,7 @@
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
[ -n "$DEBUG" ] && set -x
[[ -n "${DEBUG}" ]] && set -x
readonly BUILD_DIR="/kata-containers/tools/packaging/kata-deploy/local-build/build/"
# catch errors and then assign
@ -14,12 +14,17 @@ readonly SCRIPT_DIR="${script_dir}/nvidia"
# This will control how much output the inird/image will produce
DEBUG=""
KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-}
machine_arch=${ARCH}
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:?NVIDIA_GPU_STACK must be set}
VARIANT=${VARIANT:?VARIANT must be set}
ARCH=${ARCH:?ARCH must be set}
if [[ "$machine_arch" == "aarch64" ]]; then
machine_arch="${ARCH}"
if [[ "${machine_arch}" == "aarch64" ]]; then
distro_arch="arm64"
elif [[ "$machine_arch" == "x86_64" ]]; then
elif [[ "${machine_arch}" == "x86_64" ]]; then
distro_arch="amd64"
else
die "Unsupported architecture: ${machine_arch}"
@ -39,7 +44,7 @@ setup_nvidia-nvrc() {
pushd "${TARGET_BUILD_DIR}" > /dev/null || exit 1
rm -rf "${PROJECT}"
git clone https://github.com/NVIDIA/${PROJECT}.git
git clone https://github.com/NVIDIA/"${PROJECT}".git
pushd "${PROJECT}" > /dev/null || exit 1
@ -67,8 +72,8 @@ setup_nvidia-gpu-admin-tools() {
pushd "${TARGET_BUILD_DIR}" > /dev/null || exit 1
rm -rf "$(basename ${TARGET_GIT})"
git clone ${TARGET_GIT}
rm -rf "$(basename "${TARGET_GIT}")"
git clone "${TARGET_GIT}"
rm -rf dist
# Installed via pipx local python environment
@ -98,14 +103,14 @@ setup_nvidia-dcgm-exporter() {
local dex="dcgm-exporter"
rm -rf "${dex}"
git clone --branch "${TARGET_VERSION}" https://github.com/NVIDIA/${dex}
make -C ${dex} binary
git clone --branch "${TARGET_VERSION}" https://github.com/NVIDIA/"${dex}"
make -C "${dex}" binary
mkdir -p ../destdir/bin
mkdir -p ../destdir/etc/${dex}
mkdir -p ../destdir/etc/"${dex}"
cp ${dex}/cmd/${dex}/${dex} ../destdir/bin/.
cp ${dex}/etc/*.csv ../destdir/etc/${dex}/.
cp "${dex}"/cmd/"${dex}"/"${dex}" ../destdir/bin/.
cp "${dex}"/etc/*.csv ../destdir/etc/"${dex}"/.
tar cvfa "${TARBALL}" -C ../destdir .
tar tvf "${TARBALL}"
@ -114,7 +119,7 @@ setup_nvidia-dcgm-exporter() {
}
setup_nvidia_gpu_rootfs_stage_one() {
if [ -e "${BUILD_DIR}/kata-static-nvidia-gpu-rootfs-stage-one.tar.zst" ]; then
if [[ -e "${BUILD_DIR}/kata-static-nvidia-gpu-rootfs-stage-one.tar.zst" ]]; then
info "nvidia: GPU rootfs stage one already exists"
return
fi
@ -123,11 +128,11 @@ setup_nvidia_gpu_rootfs_stage_one() {
local rootfs_type=${1:-""}
info "nvidia: Setup GPU rootfs type=$rootfs_type"
info "nvidia: Setup GPU rootfs type=${rootfs_type}"
for component in "nvidia-gpu-admin-tools" "nvidia-dcgm-exporter" "nvidia-nvrc"; do
if [ ! -e "${BUILD_DIR}/kata-static-${component}.tar.zst" ]; then
setup_${component}
if [[ ! -e "${BUILD_DIR}/kata-static-${component}.tar.zst" ]]; then
setup_"${component}"
fi
done
@ -136,28 +141,28 @@ setup_nvidia_gpu_rootfs_stage_one() {
chmod +x ./nvidia_chroot.sh
local appendix=""
if [ "$rootfs_type" == "confidential" ]; then
if [[ "${rootfs_type}" == "confidential" ]]; then
appendix="-${rootfs_type}"
fi
if echo "$NVIDIA_GPU_STACK" | grep -q '\<dragonball\>'; then
if echo "${NVIDIA_GPU_STACK}" | grep -q '\<dragonball\>'; then
appendix="-dragonball-experimental"
fi
# We need the kernel packages for building the drivers cleanly will be
# deinstalled and removed from the roofs once the build finishes.
tar -xvf ${BUILD_DIR}/kata-static-kernel-nvidia-gpu"${appendix}"-headers.tar.xz -C .
tar -xvf "${BUILD_DIR}"/kata-static-kernel-nvidia-gpu"${appendix}"-headers.tar.xz -C .
# If we find a local downloaded run file build the kernel modules
# with it, otherwise use the distribution packages. Run files may have
# more recent drivers available then the distribution packages.
local run_file_name="nvidia-driver.run"
if [ -f ${BUILD_DIR}/${run_file_name} ]; then
cp -L ${BUILD_DIR}/${run_file_name} ./${run_file_name}
if [[ -f ${BUILD_DIR}/${run_file_name} ]]; then
cp -L "${BUILD_DIR}"/"${run_file_name}" ./"${run_file_name}"
fi
local run_fm_file_name="nvidia-fabricmanager.run"
if [ -f ${BUILD_DIR}/${run_fm_file_name} ]; then
cp -L ${BUILD_DIR}/${run_fm_file_name} ./${run_fm_file_name}
if [[ -f ${BUILD_DIR}/${run_fm_file_name} ]]; then
cp -L "${BUILD_DIR}"/"${run_fm_file_name}" ./"${run_fm_file_name}"
fi
mount --rbind /dev ./dev
@ -165,7 +170,7 @@ setup_nvidia_gpu_rootfs_stage_one() {
mount -t proc /proc ./proc
chroot . /bin/bash -c "/nvidia_chroot.sh $(uname -r) ${run_file_name} \
${run_fm_file_name} ${machine_arch} ${NVIDIA_GPU_STACK}"
${run_fm_file_name} ${machine_arch} ${NVIDIA_GPU_STACK} ${KBUILD_SIGN_PIN}"
umount -R ./dev
umount ./proc
@ -309,7 +314,7 @@ compress_rootfs() {
}
toggle_debug() {
if echo "$NVIDIA_GPU_STACK" | grep -q '\<debug\>'; then
if echo "${NVIDIA_GPU_STACK}" | grep -q '\<debug\>'; then
export DEBUG="true"
fi
}
@ -319,13 +324,13 @@ setup_nvidia_gpu_rootfs_stage_two() {
readonly stage_two="${ROOTFS_DIR:?}"
readonly stack="${NVIDIA_GPU_STACK:?}"
echo "nvidia: chisseling the following stack components: $stack"
echo "nvidia: chisseling the following stack components: ${stack}"
[ -e "${stage_one}" ] && rm -rf "${stage_one}"
[ ! -e "${stage_one}" ] && mkdir -p "${stage_one}"
[[ -e "${stage_one}" ]] && rm -rf "${stage_one}"
[[ ! -e "${stage_one}" ]] && mkdir -p "${stage_one}"
tar -C "${stage_one}" -xf ${BUILD_DIR}/kata-static-rootfs-nvidia-gpu-stage-one.tar.zst
tar -C "${stage_one}" -xf "${BUILD_DIR}"/kata-static-rootfs-nvidia-gpu-stage-one.tar.zst
pushd "${stage_two}" >> /dev/null
@ -334,19 +339,19 @@ setup_nvidia_gpu_rootfs_stage_two() {
chisseled_init
chisseled_iptables
IFS=',' read -r -a stack_components <<< "$NVIDIA_GPU_STACK"
IFS=',' read -r -a stack_components <<< "${NVIDIA_GPU_STACK}"
for component in "${stack_components[@]}"; do
if [ "$component" = "compute" ]; then
if [[ "${component}" = "compute" ]]; then
echo "nvidia: processing \"compute\" component"
chisseled_compute
elif [ "$component" = "dcgm" ]; then
elif [[ "${component}" = "dcgm" ]]; then
echo "nvidia: processing DCGM component"
chisseled_dcgm
elif [ "$component" = "nvswitch" ]; then
elif [[ "${component}" = "nvswitch" ]]; then
echo "nvidia: processing NVSwitch component"
chisseled_nvswitch
elif [ "$component" = "gpudirect" ]; then
elif [[ "${component}" = "gpudirect" ]]; then
echo "nvidia: processing GPUDirect component"
chisseled_gpudirect
fi

View File

@ -53,9 +53,11 @@ USE_DOCKER=${USE_DOCKER:-""}
USE_PODMAN=${USE_PODMAN:-""}
EXTRA_PKGS=${EXTRA_PKGS:-""}
KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-""}
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-""}
nvidia_rootfs="${script_dir}/nvidia/nvidia_rootfs.sh"
[ "${ARCH}" == "x86_64" ] || [ "${ARCH}" == "aarch64" ] && source "$nvidia_rootfs"
VARIANT=${VARIANT:-""}
[[ "${VARIANT}" == "nvidia-gpu"* ]] && source "${script_dir}/nvidia/nvidia_rootfs.sh"
#For cross build
CROSS_BUILD=${CROSS_BUILD:-false}
@ -564,6 +566,7 @@ build_rootfs_distro()
--env AGENT_POLICY="${AGENT_POLICY}" \
--env CONFIDENTIAL_GUEST="${CONFIDENTIAL_GUEST}" \
--env NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
-v "${repo_dir}":"/kata-containers" \
-v "${ROOTFS_DIR}":"/rootfs" \
-v "${script_dir}/../scripts":"/scripts" \

View File

@ -104,6 +104,7 @@ PULL_TYPE="${PULL_TYPE:-default}"
USE_CACHE="${USE_CACHE:-}"
BUSYBOX_CONF_FILE=${BUSYBOX_CONF_FILE:-}
NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK:-}"
KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-}
docker run \
-v $HOME/.docker:/root/.docker \
@ -135,6 +136,7 @@ docker run \
--env USE_CACHE="${USE_CACHE}" \
--env BUSYBOX_CONF_FILE="${BUSYBOX_CONF_FILE}" \
--env NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
--env AA_KBC="${AA_KBC:-}" \
--env HKD_PATH="$(realpath "${HKD_PATH:-}" 2> /dev/null || true)" \
--env SE_KERNEL_PARAMS="${SE_KERNEL_PARAMS:-}" \

View File

@ -53,6 +53,7 @@ TARGET_BRANCH="${TARGET_BRANCH:-main}"
PUSH_TO_REGISTRY="${PUSH_TO_REGISTRY:-}"
KERNEL_HEADERS_PKG_TYPE="${KERNEL_HEADERS_PKG_TYPE:-deb}"
RELEASE="${RELEASE:-"no"}"
KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN:-}"
workdir="${WORKDIR:-$PWD}"
@ -1246,7 +1247,15 @@ handle_build() {
kernel_headers_dir=$(get_kernel_headers_dir "${build_target}")
pushd "${kernel_headers_dir}"
find . -type f -name "*.${KERNEL_HEADERS_PKG_TYPE}" -exec tar cvfJ "${kernel_headers_final_tarball_path}" {} +
find . -type f -name "*.${KERNEL_HEADERS_PKG_TYPE}" -exec tar rvf kernel-headers.tar {} +
if [ -n "${KBUILD_SIGN_PIN}" ]; then
head -n1 kata-linux-*/certs/signing_key.pem | grep -q "ENCRYPTED PRIVATE KEY" || die "signing_key.pem is not encrypted"
mv kata-linux-*/certs/signing_key.pem .
mv kata-linux-*/certs/signing_key.x509 .
tar -rvf kernel-headers.tar signing_key.pem signing_key.x509 --remove-files
fi
xz -T0 kernel-headers.tar
mv kernel-headers.tar.xz "${kernel_headers_final_tarball_path}"
popd
fi
tar tvf "${kernel_headers_final_tarball_path}"

View File

@ -32,6 +32,7 @@ readonly default_initramfs="${script_dir}/initramfs.cpio.gz"
# xPU vendor
readonly VENDOR_INTEL="intel"
readonly VENDOR_NVIDIA="nvidia"
readonly KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-""}
#Path to kernel directory
kernel_path=""
@ -69,6 +70,7 @@ measured_rootfs="false"
CROSS_BUILD_ARG=""
packaging_scripts_dir="${script_dir}/../scripts"
# shellcheck source=tools/packaging/scripts/lib.sh
source "${packaging_scripts_dir}/lib.sh"
usage() {
@ -493,6 +495,15 @@ build_kernel_headers() {
if [ "$linux_headers" == "rpm" ]; then
make -j $(nproc) rpm-pkg ARCH="${arch_target}"
fi
# If we encrypt the key earlier it will break the kernel_headers build.
# At this stage the kernel has created the certs/signing_key.pem
# encrypt it for later usage in another job or out-of-tree build
# only encrypt if we have KBUILD_SIGN_PIN set
local key="certs/signing_key.pem"
if [ -n "${KBUILD_SIGN_PIN}" ]; then
[ -e "${key}" ] || die "${key} missing but KBUILD_SIGN_PIN is set"
openssl rsa -aes256 -in ${key} -out ${key} -passout env:KBUILD_SIGN_PIN
fi
popd >>/dev/null
}

View File

@ -7,9 +7,6 @@ CONFIG_PCI_MMCONFIG=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CRYPTO_FIPS requires this config when loading modules is enabled.
CONFIG_MODULE_SIG=y
# Linux kernel version suffix
CONFIG_LOCALVERSION="-nvidia-gpu${CONF_GUEST_SUFFIX}"
@ -25,3 +22,12 @@ CONFIG_X86_PAT=y
CONFIG_CRYPTO_ECC=y
CONFIG_CRYPTO_ECDH=y
CONFIG_CRYPTO_ECDSA=y
# Module signing
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_ALL=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_SYSTEM_TRUSTED_KEYS=""
CONFIG_SYSTEM_TRUSTED_KEYRING=y

View File

@ -1 +1 @@
147
148

View File

@ -22,6 +22,7 @@ DESTDIR=${DESTDIR:-${PWD}}
PREFIX=${PREFIX:-/opt/kata}
container_image="${KERNEL_CONTAINER_BUILDER:-$(get_kernel_image_name)}"
MEASURED_ROOTFS=${MEASURED_ROOTFS:-no}
KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN:-}"
kernel_builder_args="-a ${ARCH} $*"
if [ "${MEASURED_ROOTFS}" == "yes" ]; then
@ -71,6 +72,7 @@ docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
-w "${PWD}" \
--env DESTDIR="${DESTDIR}" --env PREFIX="${PREFIX}" \
--env USER="${USER}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
--user "$(id -u)":"$(id -g)" \
"${container_image}" \
bash -c "${kernel_builder} ${kernel_builder_args} build-headers"