mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-16 07:05:14 +00:00
Merge branch 'CCv0' into CCv0
This commit is contained in:
commit
6cd021ce86
97
.github/workflows/cc-payload-after-push.yaml
vendored
Normal file
97
.github/workflows/cc-payload-after-push.yaml
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
name: CI | Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- CCv0
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-shim-v2
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh $(pwd)/kata-static.tar.xz "quay.io/confidential-containers/runtime-payload-ci" "kata-containers-latest"
|
87
.github/workflows/cc-payload.yaml
vendored
Normal file
87
.github/workflows/cc-payload.yaml
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
name: Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'CC\-[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-shim-v2
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh $(pwd)/kata-static.tar.xz
|
@ -252,19 +252,28 @@ fn set_devices_resources(
|
||||
}
|
||||
|
||||
fn set_hugepages_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &[LinuxHugepageLimit],
|
||||
res: &mut cgroups::Resources,
|
||||
) {
|
||||
info!(sl!(), "cgroup manager set hugepage");
|
||||
let mut limits = vec![];
|
||||
let hugetlb_controller = cg.controller_of::<HugeTlbController>();
|
||||
|
||||
for l in hugepage_limits.iter() {
|
||||
if hugetlb_controller.is_some() && hugetlb_controller.unwrap().size_supported(&l.page_size)
|
||||
{
|
||||
let hr = HugePageResource {
|
||||
size: l.page_size.clone(),
|
||||
limit: l.limit,
|
||||
};
|
||||
limits.push(hr);
|
||||
} else {
|
||||
warn!(
|
||||
sl!(),
|
||||
"{} page size support cannot be verified, dropping requested limit", l.page_size
|
||||
);
|
||||
}
|
||||
}
|
||||
res.hugepages.limits = limits;
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ if [ -n "${2-}" ]; then
|
||||
data_integrity="$2"
|
||||
fi
|
||||
|
||||
device_name=$(sed -e 's/DEVNAME=//g;t;d' /sys/dev/block/${device_num}/uevent)
|
||||
device_name=$(sed -e 's/DEVNAME=//g;t;d' "/sys/dev/block/${device_num}/uevent")
|
||||
device_path="/dev/$device_name"
|
||||
if [[ -n "$device_name" && -b "$device_path" ]]; then
|
||||
storage_key_path="/run/cc_storage.key"
|
||||
@ -63,13 +63,64 @@ if [[ -n "$device_name" && -b "$device_path" ]]; then
|
||||
echo "YES" | cryptsetup luksFormat --type luks2 "$device_path" --sector-size 4096 \
|
||||
--cipher aes-xts-plain64 "$storage_key_path"
|
||||
else
|
||||
# Wiping a device is a time consuming operation. To avoid a full wipe, integritysetup
|
||||
# and crypt setup provide a --no-wipe option.
|
||||
# However, an integrity device that is not wiped will have invalid checksums. Normally
|
||||
# this should not be a problem since a page must first be written to before it can be read
|
||||
# (otherwise the data would be arbitrary). The act of writing would populate the checksum
|
||||
# for the page.
|
||||
# However, tools like mkfs.ext4 read pages before they are written; sometimes the read
|
||||
# of an unwritten page happens due to kernel buffering.
|
||||
# See https://gitlab.com/cryptsetup/cryptsetup/-/issues/525 for explanation and fix.
|
||||
# The way to propery format the non-wiped dm-integrity device is to figure out which pages
|
||||
# mkfs.ext4 will write to and then to write to those pages before hand so that they will
|
||||
# have valid integrity tags.
|
||||
echo "YES" | cryptsetup luksFormat --type luks2 "$device_path" --sector-size 4096 \
|
||||
--cipher aes-xts-plain64 --integrity hmac-sha256 "$storage_key_path"
|
||||
--cipher aes-xts-plain64 --integrity hmac-sha256 "$storage_key_path" \
|
||||
--integrity-no-wipe
|
||||
fi
|
||||
|
||||
cryptsetup luksOpen -d "$storage_key_path" "$device_path" ephemeral_image_encrypted_disk
|
||||
rm "$storage_key_path"
|
||||
mkfs.ext4 /dev/mapper/ephemeral_image_encrypted_disk
|
||||
if [ "$data_integrity" == "false" ]; then
|
||||
mkfs.ext4 /dev/mapper/ephemeral_image_encrypted_disk -E lazy_journal_init
|
||||
else
|
||||
# mkfs.ext4 doesn't perform whole sector writes and this will cause checksum failures
|
||||
# with an unwiped integrity device. Therefore, first perform a dry run.
|
||||
output=$(mkfs.ext4 /dev/mapper/ephemeral_image_encrypted_disk -F -n)
|
||||
|
||||
# The above command will produce output like
|
||||
# mke2fs 1.46.5 (30-Dec-2021)
|
||||
# Creating filesystem with 268435456 4k blocks and 67108864 inodes
|
||||
# Filesystem UUID: 4a5ff012-91c0-47d9-b4bb-8f83e830825f
|
||||
# Superblock backups stored on blocks:
|
||||
# 32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
|
||||
# 4096000, 7962624, 11239424, 20480000, 23887872, 71663616, 78675968,
|
||||
# 102400000, 214990848
|
||||
delimiter="Superblock backups stored on blocks:"
|
||||
blocks_list=$([[ $output =~ $delimiter(.*) ]] && echo "${BASH_REMATCH[1]}")
|
||||
|
||||
# Find list of blocks
|
||||
block_nums=$(echo "$blocks_list" | grep -Eo '[0-9]{4,}' | sort -n)
|
||||
|
||||
# Add zero to list of blocks
|
||||
block_nums="0 $block_nums"
|
||||
|
||||
# Iterate through each block and write to it to ensure that it has valid checksum
|
||||
for block_num in $block_nums
|
||||
do
|
||||
echo "Clearing page at $block_num"
|
||||
# Zero out the page
|
||||
dd if=/dev/zero bs=4k count=1 oflag=direct \
|
||||
of=/dev/mapper/ephemeral_image_encrypted_disk seek="$block_num"
|
||||
done
|
||||
|
||||
# Now perform the actual ext4 format. Use lazy_journal_init so that the journal is
|
||||
# initialized on demand. This is safe for ephemeral storage since we don't expect
|
||||
# ephemeral storage to survice a power cycle.
|
||||
mkfs.ext4 /dev/mapper/ephemeral_image_encrypted_disk -E lazy_journal_init
|
||||
fi
|
||||
|
||||
|
||||
[ ! -d "/run/image" ] && mkdir /run/image
|
||||
|
||||
|
@ -16,6 +16,8 @@ kata_deploy_create="${script_dir}/kata-deploy-binaries.sh"
|
||||
uid=$(id -u ${USER})
|
||||
gid=$(id -g ${USER})
|
||||
|
||||
source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
if [ "${script_dir}" != "${PWD}" ]; then
|
||||
ln -sf "${script_dir}/build" "${PWD}/build"
|
||||
fi
|
||||
@ -37,12 +39,17 @@ if [ ! -d "$HOME/.docker" ]; then
|
||||
remove_dot_docker_dir=true
|
||||
fi
|
||||
|
||||
docker build -q -t build-kata-deploy \
|
||||
container_image="${CC_BUILDER_REGISTRY}:build-kata-deploy-$(get_last_modification ${kata_dir} ${script_dir})"
|
||||
|
||||
docker pull "${container_image}" || \
|
||||
(docker build -q -t "${container_image}" \
|
||||
--build-arg IMG_USER="${USER}" \
|
||||
--build-arg UID=${uid} \
|
||||
--build-arg GID=${gid} \
|
||||
--build-arg HOST_DOCKER_GID=${docker_gid} \
|
||||
"${script_dir}/dockerbuild/"
|
||||
"${script_dir}/dockerbuild/" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}" "no")
|
||||
|
||||
docker run \
|
||||
--privileged \
|
||||
@ -56,10 +63,11 @@ docker run \
|
||||
--env AA_KBC="${AA_KBC:-}" \
|
||||
--env KATA_BUILD_CC="${KATA_BUILD_CC:-}" \
|
||||
--env INCLUDE_ROOTFS="$(realpath "${INCLUDE_ROOTFS:-}" 2> /dev/null || true)" \
|
||||
--env PUSH_TO_REGISTRY="${PUSH_TO_REGISTRY:-"no"}" \
|
||||
-v "${kata_dir}:${kata_dir}" \
|
||||
--rm \
|
||||
-w ${script_dir} \
|
||||
build-kata-deploy "${kata_deploy_create}" $@
|
||||
"${container_image}" "${kata_deploy_create}" $@
|
||||
|
||||
if [ $remove_dot_docker_dir == true ]; then
|
||||
rm -rf "$HOME/.docker"
|
||||
|
@ -152,9 +152,7 @@ install_cc_qemu() {
|
||||
#Install all components that are not assets
|
||||
install_cc_shimv2() {
|
||||
GO_VERSION="$(yq r ${versions_yaml} languages.golang.meta.newest-version)"
|
||||
RUST_VERSION="$(yq r ${versions_yaml} languages.rust.meta.newest-version)"
|
||||
export GO_VERSION
|
||||
export RUST_VERSION
|
||||
export REMOVE_VMM_CONFIGS="acrn fc"
|
||||
|
||||
extra_opts="DEFSERVICEOFFLOAD=true"
|
||||
|
@ -5,20 +5,32 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
KATA_DEPLOY_DIR="`dirname $0`/../../kata-deploy-cc"
|
||||
KATA_DEPLOY_DIR="`dirname ${0}`/../../kata-deploy-cc"
|
||||
KATA_DEPLOY_ARTIFACT="${1:-"kata-static.tar.xz"}"
|
||||
REGISTRY="${2:-"quay.io/confidential-containers/runtime-payload"}"
|
||||
TAG="${3:-}"
|
||||
|
||||
echo "Copying $KATA_DEPLOY_ARTIFACT to $KATA_DEPLOY_DIR"
|
||||
cp $KATA_DEPLOY_ARTIFACT $KATA_DEPLOY_DIR
|
||||
echo "Copying ${KATA_DEPLOY_ARTIFACT} to ${KATA_DEPLOY_DIR}"
|
||||
cp ${KATA_DEPLOY_ARTIFACT} ${KATA_DEPLOY_DIR}
|
||||
|
||||
pushd $KATA_DEPLOY_DIR
|
||||
pushd ${KATA_DEPLOY_DIR}
|
||||
|
||||
IMAGE_TAG="quay.io/confidential-containers/runtime-payload:kata-containers-$(git rev-parse HEAD)"
|
||||
IMAGE_TAG="${REGISTRY}:kata-containers-$(git rev-parse HEAD)"
|
||||
|
||||
echo "Building the image"
|
||||
docker build --tag $IMAGE_TAG .
|
||||
docker build --tag ${IMAGE_TAG} .
|
||||
|
||||
echo "Pushing the image to quay.io"
|
||||
docker push $IMAGE_TAG
|
||||
docker push ${IMAGE_TAG}
|
||||
|
||||
if [ -n "${TAG}" ]; then
|
||||
ADDITIONAL_TAG="${REGISTRY}:${TAG}"
|
||||
|
||||
echo "Building the ${ADDITIONAL_TAG} image"
|
||||
docker build --tag ${ADDITIONAL_TAG} .
|
||||
|
||||
echo "Pushing the image ${ADDITIONAL_TAG} to quay.io"
|
||||
docker push ${ADDITIONAL_TAG}
|
||||
fi
|
||||
|
||||
popd
|
||||
|
@ -8,6 +8,8 @@
|
||||
export GOPATH=${GOPATH:-${HOME}/go}
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
export CC_BUILDER_REGISTRY="quay.io/kata-containers/cc-builders"
|
||||
export PUSH_TO_REGISTRY="${PUSH_TO_REGISTRY:-"no"}"
|
||||
|
||||
this_script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
@ -113,3 +115,33 @@ get_config_version() {
|
||||
die "failed to find ${config_version_file}"
|
||||
fi
|
||||
}
|
||||
|
||||
# $1 - Repo's root dir
|
||||
# $2 - The file we're looking for the last modification
|
||||
get_last_modification() {
|
||||
local repo_root_dir="${1}"
|
||||
local file="${2}"
|
||||
|
||||
# This is a workaround needed for when running this code on Jenkins
|
||||
git config --global --add safe.directory ${repo_root_dir} &> /dev/null
|
||||
|
||||
dirty=""
|
||||
[ $(git status --porcelain | grep "${file}" | wc -l) -gt 0 ] && dirty="-dirty"
|
||||
|
||||
echo "$(git log -1 --pretty=format:"%H" ${file})${dirty}"
|
||||
}
|
||||
|
||||
# $1 - The tag to be pushed to the registry
|
||||
# $2 - "yes" to use sudo, "no" otherwise
|
||||
push_to_registry() {
|
||||
local tag="${1}"
|
||||
local use_sudo="${2:-"yes"}"
|
||||
|
||||
if [ "${PUSH_TO_REGISTRY}" == "yes" ]; then
|
||||
if [ "${use_sudo}" == "yes" ]; then
|
||||
sudo docker push ${tag}
|
||||
else
|
||||
docker push ${tag}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -15,7 +15,6 @@ readonly default_install_dir="$(cd "${script_dir}/../../kernel" && pwd)"
|
||||
|
||||
source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
container_image="kata-initramfs-builder"
|
||||
kata_version="${kata_version:-}"
|
||||
cryptsetup_repo="${cryptsetup_repo:-}"
|
||||
cryptsetup_version="${cryptsetup_version:-}"
|
||||
@ -33,8 +32,12 @@ package_output_dir="${package_output_dir:-}"
|
||||
[ -n "${lvm2_repo}" ] || die "Failed to get lvm2 repo"
|
||||
[ -n "${lvm2_version}" ] || die "Failed to get lvm2 version"
|
||||
|
||||
sudo docker build \
|
||||
-t "${container_image}" "${script_dir}"
|
||||
container_image="${CC_BUILDER_REGISTRY}:initramfs-cryptsetup-${cryptsetup_version}-lvm2-${lvm2_version}-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
|
||||
sudo docker pull ${container_image} || (sudo docker build \
|
||||
-t "${container_image}" "${script_dir}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${PWD}" \
|
||||
|
@ -12,12 +12,16 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly repo_root_dir="$(cd "${script_dir}/../../../.." && pwd)"
|
||||
readonly kernel_builder="${repo_root_dir}/tools/packaging/kernel/build-kernel.sh"
|
||||
|
||||
source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
DESTDIR=${DESTDIR:-${PWD}}
|
||||
PREFIX=${PREFIX:-/opt/kata}
|
||||
container_image="kata-kernel-builder"
|
||||
container_image="${CC_BUILDER_REGISTRY}:kernel-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
|
||||
sudo docker build -t "${container_image}" "${script_dir}"
|
||||
sudo docker pull ${container_image} || \
|
||||
(sudo docker build -t "${container_image}" "${script_dir}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${PWD}" \
|
||||
|
@ -16,7 +16,7 @@ source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
DESTDIR=${DESTDIR:-${PWD}}
|
||||
PREFIX=${PREFIX:-/opt/kata}
|
||||
container_image="kata-ovmf-builder"
|
||||
container_image="${CC_BUILDER_REGISTRY}:ovmf-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
ovmf_build="${ovmf_build:-x86_64}"
|
||||
kata_version="${kata_version:-}"
|
||||
ovmf_repo="${ovmf_repo:-}"
|
||||
@ -52,7 +52,10 @@ fi
|
||||
[ -n "$ovmf_package" ] || die "failed to get ovmf package or commit"
|
||||
[ -n "$package_output_dir" ] || die "failed to get ovmf package or commit"
|
||||
|
||||
sudo docker build -t "${container_image}" "${script_dir}"
|
||||
sudo docker pull ${container_image} || \
|
||||
(sudo docker build -t "${container_image}" "${script_dir}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${PWD}" \
|
||||
|
@ -4,15 +4,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
from ubuntu:20.04
|
||||
|
||||
|
||||
WORKDIR /root/qemu
|
||||
|
||||
# CACHE_TIMEOUT: date to invalid cache, if the date changes the image will be rebuild
|
||||
# This is required to keep build dependencies with security fixes.
|
||||
ARG CACHE_TIMEOUT
|
||||
RUN echo "$CACHE_TIMEOUT"
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
RUN apt-get update && apt-get upgrade -y && \
|
||||
apt-get --no-install-recommends install -y \
|
||||
apt-utils \
|
||||
@ -52,38 +49,7 @@ RUN apt-get update && apt-get upgrade -y && \
|
||||
if [ "$(uname -m)" != "s390x" ]; then apt-get install -y --no-install-recommends libpmem-dev; fi && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/
|
||||
|
||||
ARG QEMU_REPO
|
||||
# commit/tag/branch
|
||||
ARG QEMU_VERSION
|
||||
ARG PREFIX
|
||||
# BUILD_SUFFIX is used by the qemu-build-post.sh script to
|
||||
# properly rename non vanilla versions of the QEMU
|
||||
ARG BUILD_SUFFIX
|
||||
ARG HYPERVISOR_NAME
|
||||
ARG PKGVERSION
|
||||
ARG QEMU_DESTDIR
|
||||
ARG QEMU_TARBALL
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
RUN git clone https://github.com/axboe/liburing/ ~/liburing && \
|
||||
cd ~/liburing && \
|
||||
git checkout tags/liburing-2.1 && \
|
||||
make && make install && ldconfig
|
||||
|
||||
COPY scripts/configure-hypervisor.sh /root/configure-hypervisor.sh
|
||||
COPY qemu /root/kata_qemu
|
||||
COPY scripts/apply_patches.sh /root/apply_patches.sh
|
||||
COPY scripts/patch_qemu.sh /root/patch_qemu.sh
|
||||
COPY static-build/scripts/qemu-build-post.sh /root/static-build/scripts/qemu-build-post.sh
|
||||
COPY static-build/qemu.blacklist /root/static-build/qemu.blacklist
|
||||
|
||||
RUN git clone --depth=1 "${QEMU_REPO}" qemu && \
|
||||
cd qemu && \
|
||||
git fetch --depth=1 origin "${QEMU_VERSION}" && git checkout FETCH_HEAD && \
|
||||
scripts/git-submodule.sh update meson capstone && \
|
||||
/root/patch_qemu.sh "${QEMU_VERSION}" "/root/kata_qemu/patches" && \
|
||||
(PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s "${HYPERVISOR_NAME}" | xargs ./configure \
|
||||
--with-pkgversion="${PKGVERSION}") && \
|
||||
make -j"$(nproc ${CI:+--ignore 1})" && \
|
||||
make install DESTDIR="${QEMU_DESTDIR}" && \
|
||||
/root/static-build/scripts/qemu-build-post.sh
|
||||
|
@ -9,6 +9,8 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly repo_root_dir="$(cd "${script_dir}/../../../.." && pwd)"
|
||||
readonly qemu_builder="${script_dir}/build-qemu.sh"
|
||||
|
||||
source "${script_dir}/../../scripts/lib.sh"
|
||||
source "${script_dir}/../qemu.blacklist"
|
||||
@ -37,30 +39,32 @@ CACHE_TIMEOUT=$(date +"%Y-%m-%d")
|
||||
[ -n "${build_suffix}" ] && HYPERVISOR_NAME="kata-qemu-${build_suffix}" || HYPERVISOR_NAME="kata-qemu"
|
||||
[ -n "${build_suffix}" ] && PKGVERSION="kata-static-${build_suffix}" || PKGVERSION="kata-static"
|
||||
|
||||
container_image="qemu-static-${qemu_version,,}"
|
||||
container_image="${CC_BUILDER_REGISTRY}:qemu-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
|
||||
sudo "${container_engine}" build \
|
||||
sudo docker pull ${container_image} || \
|
||||
(sudo "${container_engine}" build \
|
||||
--build-arg CACHE_TIMEOUT="${CACHE_TIMEOUT}" \
|
||||
--build-arg BUILD_SUFFIX=${build_suffix} \
|
||||
--build-arg HYPERVISOR_NAME="${HYPERVISOR_NAME}" \
|
||||
--build-arg PKGVERSION="${PKGVERSION}" \
|
||||
--build-arg http_proxy="${http_proxy}" \
|
||||
--build-arg https_proxy="${https_proxy}" \
|
||||
--build-arg QEMU_DESTDIR="${qemu_destdir}" \
|
||||
--build-arg QEMU_REPO="${qemu_repo}" \
|
||||
--build-arg QEMU_VERSION="${qemu_version}" \
|
||||
--build-arg QEMU_TARBALL="${qemu_tar}" \
|
||||
--build-arg PREFIX="${prefix}" \
|
||||
"${packaging_dir}" \
|
||||
-f "${script_dir}/Dockerfile" \
|
||||
-t "${container_image}"
|
||||
-t "${container_image}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
sudo "${container_engine}" run \
|
||||
--rm \
|
||||
-i \
|
||||
--env BUILD_SUFFIX="${build_suffix}" \
|
||||
--env HYPERVISOR_NAME="${HYPERVISOR_NAME}" \
|
||||
--env PKGVERSION="${PKGVERSION}" \
|
||||
--env QEMU_DESTDIR="${qemu_destdir}" \
|
||||
--env QEMU_REPO="${qemu_repo}" \
|
||||
--env QEMU_VERSION="${qemu_version}" \
|
||||
--env QEMU_TARBALL="${qemu_tar}" \
|
||||
--env PREFIX="${prefix}" \
|
||||
-v "${repo_root_dir}:/root/kata-containers" \
|
||||
-v "${PWD}":/share "${container_image}" \
|
||||
mv "${qemu_destdir}/${qemu_tar}" /share/
|
||||
|
||||
sudo docker image rm "${container_image}"
|
||||
bash -c "/root/kata-containers/tools/packaging/static-build/qemu/build-qemu.sh"
|
||||
|
||||
sudo chown ${USER}:$(id -gn ${USER}) "${PWD}/${qemu_tar}"
|
||||
|
28
tools/packaging/static-build/qemu/build-qemu.sh
Executable file
28
tools/packaging/static-build/qemu/build-qemu.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
kata_packaging_dir="/root/kata-containers/tools/packaging"
|
||||
kata_packaging_scripts="${kata_packaging_dir}/scripts"
|
||||
|
||||
kata_static_build_dir="${kata_packaging_dir}/static-build"
|
||||
kata_static_build_scripts="${kata_static_build_dir}/scripts"
|
||||
|
||||
git clone --depth=1 "${QEMU_REPO}" qemu
|
||||
pushd qemu
|
||||
git fetch --depth=1 origin "${QEMU_VERSION}"
|
||||
git checkout FETCH_HEAD
|
||||
scripts/git-submodule.sh update meson capstone
|
||||
${kata_packaging_scripts}/patch_qemu.sh "${QEMU_VERSION}" "${kata_packaging_dir}/qemu/patches"
|
||||
PREFIX="${PREFIX}" ${kata_packaging_scripts}/configure-hypervisor.sh -s "${HYPERVISOR_NAME}" | xargs ./configure --with-pkgversion="${PKGVERSION}"
|
||||
make -j"$(nproc +--ignore 1)"
|
||||
make install DESTDIR="${QEMU_DESTDIR}"
|
||||
popd
|
||||
${kata_static_build_scripts}/qemu-build-post.sh
|
||||
mv "${QEMU_DESTDIR}/${QEMU_TARBALL}" /share/
|
@ -12,33 +12,42 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly repo_root_dir="$(cd "${script_dir}/../../../.." && pwd)"
|
||||
readonly kernel_builder="${repo_root_dir}/tools/packaging/kernel/build-kernel.sh"
|
||||
|
||||
source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
GO_VERSION=${GO_VERSION}
|
||||
RUST_VERSION=${RUST_VERSION}
|
||||
RUST_VERSION=${RUST_VERSION:-}
|
||||
|
||||
DESTDIR=${DESTDIR:-${PWD}}
|
||||
PREFIX=${PREFIX:-/opt/kata}
|
||||
container_image="shim-v2-builder"
|
||||
container_image="${CC_BUILDER_REGISTRY}:shim-v2-go-${GO_VERSION}-rust-${RUST_VERSION}-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
|
||||
EXTRA_OPTS="${EXTRA_OPTS:-""}"
|
||||
REMOVE_VMM_CONFIGS="${REMOVE_VMM_CONFIGS:-""}"
|
||||
|
||||
sudo docker build --build-arg GO_VERSION="${GO_VERSION}" --build-arg RUST_VERSION="${RUST_VERSION}" -t "${container_image}" "${script_dir}"
|
||||
sudo docker pull ${container_image} || \
|
||||
(sudo docker build \
|
||||
--build-arg GO_VERSION="${GO_VERSION}" \
|
||||
--build-arg RUST_VERSION="${RUST_VERSION}" \
|
||||
-t "${container_image}" "${script_dir}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
arch=$(uname -m)
|
||||
if [ ${arch} = "ppc64le" ]; then
|
||||
arch="ppc64"
|
||||
fi
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
if [ -n "${RUST_VERSION}" ]; then
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${repo_root_dir}/src/runtime-rs" \
|
||||
"${container_image}" \
|
||||
bash -c "git config --global --add safe.directory ${repo_root_dir} && make PREFIX=${PREFIX} QEMUCMD=qemu-system-${arch}"
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${repo_root_dir}/src/runtime-rs" \
|
||||
"${container_image}" \
|
||||
bash -c "git config --global --add safe.directory ${repo_root_dir} && make PREFIX="${PREFIX}" DESTDIR="${DESTDIR}" install"
|
||||
fi
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${repo_root_dir}/src/runtime" \
|
||||
|
@ -51,11 +51,13 @@ EOF
|
||||
trap finish EXIT
|
||||
|
||||
rust_version=${2:-}
|
||||
ARCH=${ARCH:-$(uname -m)}
|
||||
LIBC=${LIBC:-musl}
|
||||
curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSLf | sh -s -- -y --default-toolchain ${rust_version} -t ${ARCH}-unknown-linux-${LIBC}
|
||||
source /root/.cargo/env
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
if [ -n "${rust_version}" ]; then
|
||||
ARCH=${ARCH:-$(uname -m)}
|
||||
LIBC=${LIBC:-musl}
|
||||
curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSLf | sh -s -- -y --default-toolchain ${rust_version} -t ${ARCH}-unknown-linux-${LIBC}
|
||||
source /root/.cargo/env
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
fi
|
||||
|
||||
pushd "${tmp_dir}"
|
||||
|
||||
|
@ -16,7 +16,6 @@ source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
DESTDIR=${DESTDIR:-${PWD}}
|
||||
PREFIX=${PREFIX:-/opt/kata}
|
||||
container_image="kata-td-shim-builder"
|
||||
kata_version="${kata_version:-}"
|
||||
tdshim_repo="${tdshim_repo:-}"
|
||||
tdshim_version="${tdshim_version:-}"
|
||||
@ -31,9 +30,15 @@ package_output_dir="${package_output_dir:-}"
|
||||
[ -n "${tdshim_version}" ] || die "Failed to get TD-shim version or commit"
|
||||
[ -n "${tdshim_toolchain}" ] || die "Failed to get TD-shim toolchain to be used to build the project"
|
||||
|
||||
sudo docker build \
|
||||
container_image="${CC_BUILDER_REGISTRY}:td-shim-${tdshim_toolchain}-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
|
||||
sudo docker pull ${container_image} || \
|
||||
(sudo docker build \
|
||||
--build-arg RUST_TOOLCHAIN="${tdshim_toolchain}" \
|
||||
-t "${container_image}" "${script_dir}"
|
||||
-t "${container_image}" \
|
||||
"${script_dir}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${PWD}" \
|
||||
|
@ -16,19 +16,21 @@ source "${script_dir}/../../scripts/lib.sh"
|
||||
|
||||
DESTDIR=${DESTDIR:-${PWD}}
|
||||
PREFIX=${PREFIX:-/opt/kata}
|
||||
container_image="kata-virtiofsd-builder"
|
||||
kata_version="${kata_version:-}"
|
||||
virtiofsd_repo="${virtiofsd_repo:-}"
|
||||
virtiofsd_version="${virtiofsd_version:-}"
|
||||
virtiofsd_toolchain="${virtiofsd_toolchain:-}"
|
||||
virtiofsd_zip="${virtiofsd_zip:-}"
|
||||
package_output_dir="${package_output_dir:-}"
|
||||
|
||||
[ -n "${virtiofsd_repo}" ] || virtiofsd_repo=$(get_from_kata_deps "externals.virtiofsd.url")
|
||||
[ -n "${virtiofsd_version}" ] || virtiofsd_version=$(get_from_kata_deps "externals.virtiofsd.version")
|
||||
[ -n "${virtiofsd_toolchain}" ] || virtiofsd_toolchain=$(get_from_kata_deps "externals.virtiofsd.toolchain")
|
||||
[ -n "${virtiofsd_zip}" ] || virtiofsd_zip=$(get_from_kata_deps "externals.virtiofsd.meta.binary")
|
||||
|
||||
[ -n "${virtiofsd_repo}" ] || die "Failed to get virtiofsd repo"
|
||||
[ -n "${virtiofsd_version}" ] || die "Failed to get virtiofsd version or commit"
|
||||
[ -n "${virtiofsd_toolchain}" ] || die "Failed to get the rust toolchain to build virtiofsd"
|
||||
[ -n "${virtiofsd_zip}" ] || die "Failed to get virtiofsd binary URL"
|
||||
|
||||
ARCH=$(uname -m)
|
||||
@ -47,8 +49,14 @@ case ${ARCH} in
|
||||
;;
|
||||
esac
|
||||
|
||||
sudo docker build \
|
||||
-t "${container_image}" "${script_dir}/${libc}"
|
||||
container_image="${CC_BUILDER_REGISTRY}:virtiofsd-${virtiofsd_toolchain}-${libc}-$(get_last_modification ${repo_root_dir} ${script_dir})"
|
||||
|
||||
sudo docker pull ${container_image} || \
|
||||
(sudo docker build \
|
||||
--build-arg RUST_TOOLCHAIN="${virtiofsd_toolchain}" \
|
||||
-t "${container_image}" "${script_dir}/${libc}" && \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-w "${PWD}" \
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
FROM ubuntu:20.04
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG RUST_TOOLCHAIN
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN apt-get update && \
|
||||
@ -16,4 +17,4 @@ RUN apt-get update && \
|
||||
libseccomp-dev \
|
||||
unzip && \
|
||||
apt-get clean && rm -rf /var/lib/lists/ && \
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain ${RUST_TOOLCHAIN}
|
||||
|
@ -3,6 +3,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
FROM alpine:3.16.2
|
||||
ARG RUST_TOOLCHAIN
|
||||
|
||||
SHELL ["/bin/ash", "-o", "pipefail", "-c"]
|
||||
RUN apk --no-cache add \
|
||||
@ -13,4 +14,4 @@ RUN apk --no-cache add \
|
||||
libcap-ng-static \
|
||||
libseccomp-static \
|
||||
musl-dev && \
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain ${RUST_TOOLCHAIN}
|
||||
|
@ -314,6 +314,7 @@ externals:
|
||||
description: "vhost-user virtio-fs device backend written in Rust"
|
||||
url: "https://gitlab.com/virtio-fs/virtiofsd"
|
||||
version: "v1.3.0"
|
||||
toolchain: "1.62.0"
|
||||
meta:
|
||||
# From https://gitlab.com/virtio-fs/virtiofsd/-/releases/v1.3.0,
|
||||
# this is the link labelled virtiofsd-v1.3.0.zip
|
||||
|
Loading…
Reference in New Issue
Block a user