mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
135 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f2d72874a1 | ||
|
|
fc2c77f3b6 | ||
|
|
07d2b00863 | ||
|
|
39bf10875b | ||
|
|
28b57627bd | ||
|
|
02b5fa15ac | ||
|
|
cfbc425041 | ||
|
|
341e5ca58e | ||
|
|
95fc585103 | ||
|
|
cf8b82794a | ||
|
|
e1f6aca9de | ||
|
|
7b0c1d0a8c | ||
|
|
07fe7325c2 | ||
|
|
372346baed | ||
|
|
5f1b1d8932 | ||
|
|
a5c863a907 | ||
|
|
cc9ecedaea | ||
|
|
69ed4bc3b7 | ||
|
|
c82db45eaa | ||
|
|
a88174e977 | ||
|
|
c15f77737a | ||
|
|
eef2795226 | ||
|
|
e604e51b3d | ||
|
|
1d56fd0308 | ||
|
|
7d45382f54 | ||
|
|
3fb91dd631 | ||
|
|
59ed19e8b2 | ||
|
|
2424c1a562 | ||
|
|
b4695f6303 | ||
|
|
037281d699 | ||
|
|
9b7fb31ce6 | ||
|
|
bec1de7bd7 | ||
|
|
ac4f986e3e | ||
|
|
b4b3471bcb | ||
|
|
491210ed22 | ||
|
|
5d7c5bdfa4 | ||
|
|
c2ba15c111 | ||
|
|
007514154c | ||
|
|
4ef05c6176 | ||
|
|
f02d540799 | ||
|
|
935327b5aa | ||
|
|
e93ed6c20e | ||
|
|
d4bd314d52 | ||
|
|
9113606d45 | ||
|
|
42cd2ce6e4 | ||
|
|
a93ff57c7d | ||
|
|
300a827d03 | ||
|
|
652662ae09 | ||
|
|
111082db07 | ||
|
|
0033a0c23a | ||
|
|
62b3a07e2f | ||
|
|
5d96734831 | ||
|
|
a94982d8b8 | ||
|
|
84a411dac4 | ||
|
|
c86f76d324 | ||
|
|
a8ccd9a2ac | ||
|
|
9e609dd34f | ||
|
|
531a29137e | ||
|
|
14a3adf4d6 | ||
|
|
5f9cc86b5a | ||
|
|
c7064027f4 | ||
|
|
57d893b5dc | ||
|
|
4aa7d4e358 | ||
|
|
fe55b29ef0 | ||
|
|
fb87bf221f | ||
|
|
0f6113a743 | ||
|
|
a23ceac913 | ||
|
|
2a67038836 | ||
|
|
25e6f4b2a5 | ||
|
|
5e1fc5a63f | ||
|
|
8b998e5f0c | ||
|
|
9b614a4615 | ||
|
|
85d3bcd713 | ||
|
|
711d12e5db | ||
|
|
efd492d562 | ||
|
|
9c19d7674a | ||
|
|
9105c1fa0c | ||
|
|
6f4f94a9f0 | ||
|
|
20442c0eae | ||
|
|
b87b4b6756 | ||
|
|
4011071526 | ||
|
|
de3452f8e1 | ||
|
|
bdf10e651a | ||
|
|
92b8091f62 | ||
|
|
ca2098f828 | ||
|
|
f9930971a2 | ||
|
|
559018554b | ||
|
|
9699c7ed06 | ||
|
|
eac197d3b7 | ||
|
|
7f659f3d63 | ||
|
|
16a91fccbe | ||
|
|
175fe8bc66 | ||
|
|
6bb00d9a1d | ||
|
|
500508a592 | ||
|
|
3240f8a4b8 | ||
|
|
c472fe1924 | ||
|
|
3e5d360185 | ||
|
|
6f70ab9169 | ||
|
|
1230bc77f2 | ||
|
|
f5a9aaa100 | ||
|
|
28166c8a32 | ||
|
|
d93900c128 | ||
|
|
1b10e82559 | ||
|
|
e46d24184a | ||
|
|
f340b31c41 | ||
|
|
c3d1b3c5e3 | ||
|
|
8763a9bc90 | ||
|
|
78cbf33f1d | ||
|
|
5dba680afb | ||
|
|
48e2df53f7 | ||
|
|
2cc48f7822 | ||
|
|
920484918c | ||
|
|
9486790089 | ||
|
|
516daecc50 | ||
|
|
30a64092a7 | ||
|
|
322073bea1 | ||
|
|
e69635b376 | ||
|
|
fa7bca4179 | ||
|
|
6c19a067a0 | ||
|
|
5e4990bcf5 | ||
|
|
893f6a4ca0 | ||
|
|
e43c59a2c6 | ||
|
|
0debf77770 | ||
|
|
b4da4b5e3b | ||
|
|
ed4c727c12 | ||
|
|
e9f36f8187 | ||
|
|
a5733877a4 | ||
|
|
62e8815a5a | ||
|
|
64306dc888 | ||
|
|
358ebf5134 | ||
|
|
2242aee099 | ||
|
|
ef367d81f2 | ||
|
|
706e8bce89 | ||
|
|
d7f6fabe65 | ||
|
|
780b36f477 |
3
.github/actionlint.yaml
vendored
3
.github/actionlint.yaml
vendored
@@ -21,4 +21,5 @@ self-hosted-runner:
|
||||
- sev-snp
|
||||
- s390x
|
||||
- s390x-large
|
||||
- tdx
|
||||
- tdx-no-attestation
|
||||
- tdx-attestation
|
||||
|
||||
33
.github/workflows/actionlint.yaml
vendored
Normal file
33
.github/workflows/actionlint.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Lint GHA workflows
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths:
|
||||
- '.github/workflows/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-actionlint:
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install actionlint gh extension
|
||||
run: gh extension install https://github.com/cschleiden/gh-actionlint
|
||||
|
||||
- name: Run actionlint
|
||||
run: gh actionlint
|
||||
2
.github/workflows/add-issues-to-project.yaml
vendored
2
.github/workflows/add-issues-to-project.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
pushd "$(mktemp -d)" &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
2
.github/workflows/add-pr-sizing-label.yaml
vendored
2
.github/workflows/add-pr-sizing-label.yaml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
pushd "$(mktemp -d)" &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install pr-add-size-label.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
16
.github/workflows/build-checks.yaml
vendored
16
.github/workflows/build-checks.yaml
vendored
@@ -49,8 +49,8 @@ jobs:
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE $HOME
|
||||
sudo rm -rf $GITHUB_WORKSPACE/* && echo "GITHUB_WORKSPACE removed" || { sleep 10 && sudo rm -rf $GITHUB_WORKSPACE/*; }
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE" "$HOME"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/* || { sleep 10 && sudo rm -rf "$GITHUB_WORKSPACE"/*; }
|
||||
sudo rm -f /tmp/kata_hybrid* # Sometime we got leftover from test_setup_hvsock_failed()
|
||||
|
||||
- name: Checkout the code
|
||||
@@ -67,12 +67,12 @@ jobs:
|
||||
if: ${{ matrix.component == 'runtime' }}
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Install rust
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: |
|
||||
./tests/install_rust.sh
|
||||
echo "${HOME}/.cargo/bin" >> $GITHUB_PATH
|
||||
echo "${HOME}/.cargo/bin" >> "$GITHUB_PATH"
|
||||
- name: Install musl-tools
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: sudo apt-get -y install musl-tools
|
||||
@@ -86,8 +86,8 @@ jobs:
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> "$GITHUB_ENV"
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> "$GITHUB_ENV"
|
||||
- name: Install protobuf-compiler
|
||||
if: ${{ matrix.command != 'make vendor' && (matrix.component == 'agent' || matrix.component == 'genpolicy' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install protobuf-compiler
|
||||
@@ -97,8 +97,8 @@ jobs:
|
||||
- name: Setup XDG_RUNTIME_DIR for the `runtime` tests
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.component == 'runtime' }}
|
||||
run: |
|
||||
XDG_RUNTIME_DIR=$(mktemp -d /tmp/kata-tests-$USER.XXX | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> $GITHUB_ENV
|
||||
XDG_RUNTIME_DIR=$(mktemp -d "/tmp/kata-tests-$USER.XXX" | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
|
||||
run: |
|
||||
cd ${{ matrix.component-path }}
|
||||
|
||||
@@ -37,6 +37,7 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- cloud-hypervisor-glibc
|
||||
- coco-guest-components
|
||||
- csi-kata-directvolume
|
||||
- firecracker
|
||||
- genpolicy
|
||||
- kata-ctl
|
||||
@@ -88,7 +89,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -180,7 +181,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -199,9 +200,25 @@ jobs:
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
- coco-guest-components
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset, build-asset-rootfs]
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -236,7 +253,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: shim-v2
|
||||
TAR_OUTPUT: shim-v2.tar.gz
|
||||
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -140,9 +140,23 @@ jobs:
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
runs-on: arm64-builder
|
||||
needs: [build-asset, build-asset-rootfs]
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -176,7 +190,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: shim-v2
|
||||
TAR_OUTPUT: shim-v2.tar.gz
|
||||
@@ -201,7 +215,7 @@ jobs:
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
@@ -36,8 +36,8 @@ jobs:
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
${HOME}/scripts/prepare_runner.sh
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -94,8 +94,8 @@ jobs:
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
${HOME}/scripts/prepare_runner.sh
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -148,14 +148,28 @@ jobs:
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-ppc64le-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
runs-on: ppc64le
|
||||
needs: [build-asset, build-asset-rootfs]
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
${HOME}/scripts/prepare_runner.sh
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -189,7 +203,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: shim-v2
|
||||
TAR_OUTPUT: shim-v2.tar.gz
|
||||
@@ -214,7 +228,7 @@ jobs:
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -153,7 +153,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
@@ -204,7 +204,7 @@ jobs:
|
||||
make boot-image-se-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
sudo chown -R $(id -u):$(id -g) "kata-build"
|
||||
sudo chown -R "$(id -u)":"$(id -g)" "kata-build"
|
||||
env:
|
||||
HKD_PATH: "host-key-document"
|
||||
|
||||
@@ -216,9 +216,25 @@ jobs:
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset-rootfs, build-asset-boot-image-se]
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
- coco-guest-components
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-rootfs]
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -253,7 +269,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: shim-v2
|
||||
TAR_OUTPUT: shim-v2.tar.gz
|
||||
@@ -275,7 +291,11 @@ jobs:
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-boot-image-se, build-asset-shim-v2]
|
||||
needs:
|
||||
- build-asset
|
||||
- build-asset-rootfs
|
||||
- build-asset-boot-image-se
|
||||
- build-asset-shim-v2
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
2
.github/workflows/cargo-deny-runner.yaml
vendored
2
.github/workflows/cargo-deny-runner.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
run: bash cargo-deny-generator.sh
|
||||
working-directory: ./.github/cargo-deny-composite-action/
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
GOPATH: ${{ github.workspace }}/kata-containers
|
||||
- name: Run Action
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: ./.github/cargo-deny-composite-action
|
||||
|
||||
2
.github/workflows/ci-nightly-s390x.yaml
vendored
2
.github/workflows/ci-nightly-s390x.yaml
vendored
@@ -16,6 +16,6 @@ jobs:
|
||||
- name: Fetch a test result for {{ matrix.test_title }}
|
||||
run: |
|
||||
file_name="${TEST_TITLE}-$(date +%Y-%m-%d).log"
|
||||
/home/${USER}/script/handle_test_log.sh download $file_name
|
||||
"/home/${USER}/script/handle_test_log.sh" download "$file_name"
|
||||
env:
|
||||
TEST_TITLE: ${{ matrix.test_title }}
|
||||
|
||||
1
.github/workflows/ci-weekly.yaml
vendored
1
.github/workflows/ci-weekly.yaml
vendored
@@ -83,4 +83,5 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
secrets: inherit
|
||||
|
||||
55
.github/workflows/ci.yaml
vendored
55
.github/workflows/ci.yaml
vendored
@@ -135,6 +135,56 @@ jobs:
|
||||
platforms: linux/amd64, linux/s390x
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
publish-csi-driver-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64-${{ inputs.tag }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install tools
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-artifacts
|
||||
|
||||
- name: Copy binary into Docker context
|
||||
run: |
|
||||
# Copy to the location where the Dockerfile expects the binary.
|
||||
mkdir -p src/tools/csi-kata-directvolume/bin/
|
||||
cp /opt/kata/bin/csi-kata-directvolume src/tools/csi-kata-directvolume/bin/directvolplugin
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/csi-kata-directvolume:${{ inputs.pr-number }}
|
||||
push: true
|
||||
context: src/tools/csi-kata-directvolume/
|
||||
platforms: linux/amd64
|
||||
file: src/tools/csi-kata-directvolume/Dockerfile
|
||||
|
||||
run-kata-monitor-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
@@ -173,7 +223,10 @@ jobs:
|
||||
|
||||
run-kata-coco-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
needs:
|
||||
- publish-kata-deploy-payload-amd64
|
||||
- build-and-publish-tee-confidential-unencrypted-image
|
||||
- publish-csi-driver-amd64
|
||||
uses: ./.github/workflows/run-kata-coco-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
|
||||
2
.github/workflows/darwin-tests.yaml
vendored
2
.github/workflows/darwin-tests.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.2
|
||||
- name: Checkout code
|
||||
|
||||
10
.github/workflows/docs-url-alive-check.yaml
vendored
10
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -12,15 +12,15 @@ jobs:
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
GOPATH: ${{ github.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
echo "GOPATH=${{ github.workspace }}" >> "$GITHUB_ENV"
|
||||
echo "${{ github.workspace }}/bin" >> "$GITHUB_PATH"
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -29,4 +29,4 @@ jobs:
|
||||
# docs url alive check
|
||||
- name: Docs URL Alive Check
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make docs-url-alive-check
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}" && make docs-url-alive-check
|
||||
|
||||
@@ -20,9 +20,9 @@ jobs:
|
||||
run: |
|
||||
pushd tools/packaging/kata-deploy/runtimeclasses/
|
||||
echo "::group::Combine runtime classes"
|
||||
for runtimeClass in `find . -type f \( -name "*.yaml" -and -not -name "kata-runtimeClasses.yaml" \) | sort`; do
|
||||
for runtimeClass in $(find . -type f \( -name "*.yaml" -and -not -name "kata-runtimeClasses.yaml" \) | sort); do
|
||||
echo "Adding ${runtimeClass} to the resultingRuntimeClasses.yaml"
|
||||
cat ${runtimeClass} >> resultingRuntimeClasses.yaml;
|
||||
cat "${runtimeClass}" >> resultingRuntimeClasses.yaml;
|
||||
done
|
||||
echo "::endgroup::"
|
||||
echo "::group::Displaying the content of resultingRuntimeClasses.yaml"
|
||||
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
pushd "$(mktemp -d)" &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
@@ -72,9 +72,9 @@ jobs:
|
||||
project_type="org"
|
||||
project_column="In progress"
|
||||
|
||||
for issue_url in $(echo "$linked_issue_urls")
|
||||
for issue_url in $linked_issue_urls
|
||||
do
|
||||
issue=$(echo "$issue_url"| awk -F\/ '{print $NF}' || true)
|
||||
issue=$(echo "$issue_url"| awk -F/ '{print $NF}' || true)
|
||||
|
||||
[ -z "$issue" ] && {
|
||||
echo "::error::Cannot determine issue number from $issue_url for PR $pr"
|
||||
|
||||
@@ -62,5 +62,5 @@ jobs:
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -66,6 +66,5 @@ jobs:
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
|
||||
|
||||
@@ -28,12 +28,12 @@ jobs:
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
${HOME}/scripts/prepare_runner.sh
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -71,5 +71,5 @@ jobs:
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
|
||||
@@ -62,5 +62,5 @@ jobs:
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
|
||||
12
.github/workflows/release-amd64.yaml
vendored
12
.github/workflows/release-amd64.yaml
vendored
@@ -42,18 +42,18 @@ jobs:
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tag=$(echo "$GITHUB_REF" | cut -d/ -f3-)
|
||||
if [ "${tag}" = "main" ]; then
|
||||
tag=$(./tools/packaging/release/release.sh release-version)
|
||||
tags=(${tag} "latest")
|
||||
tags=("${tag}" "latest")
|
||||
else
|
||||
tags=(${tag})
|
||||
tags=("${tag}")
|
||||
fi
|
||||
for tag in ${tags[@]}; do
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
|
||||
12
.github/workflows/release-arm64.yaml
vendored
12
.github/workflows/release-arm64.yaml
vendored
@@ -42,18 +42,18 @@ jobs:
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tag=$(echo "$GITHUB_REF" | cut -d/ -f3-)
|
||||
if [ "${tag}" = "main" ]; then
|
||||
tag=$(./tools/packaging/release/release.sh release-version)
|
||||
tags=(${tag} "latest")
|
||||
tags=("${tag}" "latest")
|
||||
else
|
||||
tags=(${tag})
|
||||
tags=("${tag}")
|
||||
fi
|
||||
for tag in ${tags[@]}; do
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
|
||||
16
.github/workflows/release-ppc64le.yaml
vendored
16
.github/workflows/release-ppc64le.yaml
vendored
@@ -20,8 +20,8 @@ jobs:
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
bash ${HOME}/scripts/prepare_runner.sh
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
bash "${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
@@ -47,18 +47,18 @@ jobs:
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tag=$(echo "$GITHUB_REF" | cut -d/ -f3-)
|
||||
if [ "${tag}" = "main" ]; then
|
||||
tag=$(./tools/packaging/release/release.sh release-version)
|
||||
tags=(${tag} "latest")
|
||||
tags=("${tag}" "latest")
|
||||
else
|
||||
tags=(${tag})
|
||||
tags=("${tag}")
|
||||
fi
|
||||
for tag in ${tags[@]}; do
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
|
||||
12
.github/workflows/release-s390x.yaml
vendored
12
.github/workflows/release-s390x.yaml
vendored
@@ -42,18 +42,18 @@ jobs:
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tag=$(echo "$GITHUB_REF" | cut -d/ -f3-)
|
||||
if [ "${tag}" = "main" ]; then
|
||||
tag=$(./tools/packaging/release/release.sh release-version)
|
||||
tags=(${tag} "latest")
|
||||
tags=("${tag}" "latest")
|
||||
else
|
||||
tags=(${tag})
|
||||
tags=("${tag}")
|
||||
fi
|
||||
for tag in ${tags[@]}; do
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
|
||||
17
.github/workflows/release.yaml
vendored
17
.github/workflows/release.yaml
vendored
@@ -175,6 +175,23 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
upload-helm-chart-tarball:
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
id: install
|
||||
|
||||
- name: Generate and upload helm chart tarball
|
||||
run: |
|
||||
./tools/packaging/release/release.sh upload-helm-chart-tarball
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
publish-release:
|
||||
needs: [ build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le, publish-multi-arch-images, upload-multi-arch-static-tarball, upload-versions-yaml, upload-cargo-vendored-tarball, upload-libseccomp-tarball ]
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
@@ -30,12 +30,12 @@ jobs:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
run: sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
bash ${HOME}/scripts/prepare_runner.sh cri-containerd
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
bash "${HOME}/scripts/prepare_runner.sh" cri-containerd
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -62,6 +62,6 @@ jobs:
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
|
||||
- name: Cleanup actions for the self hosted runner
|
||||
run: ${HOME}/scripts/cleanup_runner.sh
|
||||
run: bash "${HOME}/scripts/cleanup_runner.sh"
|
||||
|
||||
@@ -86,11 +86,11 @@ jobs:
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.snapshotter }}-${{ matrix.k8s }}-${{ matrix.instance }}-${{ inputs.tag }}
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.snapshotter }}-${{ matrix.k8s }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
|
||||
14
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
14
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
@@ -44,9 +44,9 @@ jobs:
|
||||
TARGET_ARCH: "ppc64le"
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
bash ${HOME}/scripts/prepare_runner.sh kubernetes
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh" kubernetes
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -62,13 +62,13 @@ jobs:
|
||||
- name: Install golang
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
|
||||
- name: Prepare the runner for k8s cluster creation
|
||||
run: bash ${HOME}/scripts/k8s_cluster_cleanup.sh
|
||||
run: bash "${HOME}/scripts/k8s_cluster_cleanup.sh"
|
||||
|
||||
- name: Create k8s cluster using kubeadm
|
||||
run: bash ${HOME}/scripts/k8s_cluster_create.sh
|
||||
run: bash "${HOME}/scripts/k8s_cluster_create.sh"
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
@@ -79,4 +79,4 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete cluster and post cleanup actions
|
||||
run: bash ${HOME}/scripts/k8s_cluster_cleanup.sh
|
||||
run: bash "${HOME}/scripts/k8s_cluster_cleanup.sh"
|
||||
|
||||
6
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
6
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
@@ -88,13 +88,13 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set SNAPSHOTTER to empty if overlayfs
|
||||
run: echo "SNAPSHOTTER=" >> $GITHUB_ENV
|
||||
run: echo "SNAPSHOTTER=" >> "$GITHUB_ENV"
|
||||
if: ${{ matrix.snapshotter == 'overlayfs' }}
|
||||
|
||||
- name: Set KBS and KBS_INGRESS if qemu-coco-dev
|
||||
run: |
|
||||
echo "KBS=true" >> $GITHUB_ENV
|
||||
echo "KBS_INGRESS=nodeport" >> $GITHUB_ENV
|
||||
echo "KBS=true" >> "$GITHUB_ENV"
|
||||
echo "KBS_INGRESS=nodeport" >> "$GITHUB_ENV"
|
||||
if: ${{ matrix.vmm == 'qemu-coco-dev' }}
|
||||
|
||||
# qemu-runtime-rs only works with overlayfs
|
||||
|
||||
@@ -21,6 +21,9 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
@@ -40,7 +43,6 @@ jobs:
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
# Some tests rely on that variable to run (or not)
|
||||
KBS: "true"
|
||||
|
||||
32
.github/workflows/run-kata-coco-tests.yaml
vendored
32
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -97,6 +97,10 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 100
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
@@ -113,6 +117,10 @@ jobs:
|
||||
if: ${{ always() && matrix.machine != 'tdx-no-attestation' }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
run-k8s-tests-on-sev:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -159,10 +167,18 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev
|
||||
@@ -172,9 +188,6 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
|
||||
|
||||
run-k8s-tests-sev-snp:
|
||||
# Skipping SNP tests to unblock the CI.
|
||||
# Will revert after issue is fixed.
|
||||
if: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -234,6 +247,10 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
@@ -250,6 +267,10 @@ jobs:
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
run-k8s-tests-coco-nontee:
|
||||
strategy:
|
||||
@@ -267,7 +288,6 @@ jobs:
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
# Some tests rely on that variable to run (or not)
|
||||
KBS: "true"
|
||||
@@ -341,6 +361,10 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 80
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
14
.github/workflows/static-checks.yaml
vendored
14
.github/workflows/static-checks.yaml
vendored
@@ -31,8 +31,8 @@ jobs:
|
||||
run: |
|
||||
kernel_dir="tools/packaging/kernel/"
|
||||
kernel_version_file="${kernel_dir}kata_config_version"
|
||||
modified_files=$(git diff --name-only origin/$GITHUB_BASE_REF..HEAD)
|
||||
if git diff --name-only origin/$GITHUB_BASE_REF..HEAD "${kernel_dir}" | grep "${kernel_dir}"; then
|
||||
modified_files=$(git diff --name-only origin/"$GITHUB_BASE_REF"..HEAD)
|
||||
if git diff --name-only origin/"$GITHUB_BASE_REF"..HEAD "${kernel_dir}" | grep "${kernel_dir}"; then
|
||||
echo "Kernel directory has changed, checking if $kernel_version_file has been updated"
|
||||
if echo "$modified_files" | grep -v "README.md" | grep "${kernel_dir}" >>"/dev/null"; then
|
||||
echo "$modified_files" | grep "$kernel_version_file" >>/dev/null || ( echo "Please bump version in $kernel_version_file" && exit 1)
|
||||
@@ -107,19 +107,19 @@ jobs:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Install yq
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }}
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}"
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }}
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}"
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
- name: Run check
|
||||
run: |
|
||||
export PATH=${PATH}:${GOPATH}/bin
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ${{ matrix.cmd }}
|
||||
export PATH="${PATH}:${GOPATH}/bin"
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}" && ${{ matrix.cmd }}
|
||||
|
||||
@@ -627,7 +627,7 @@ the following steps (using rootfs or initrd image).
|
||||
>
|
||||
> Look for `INIT_PROCESS=systemd` in the `config.sh` osbuilder rootfs config file
|
||||
> to verify an osbuilder distro supports systemd for the distro you want to build rootfs for.
|
||||
> For an example, see the [Clear Linux config.sh file](../tools/osbuilder/rootfs-builder/clearlinux/config.sh).
|
||||
> For an example, see the [Ubuntu config.sh file](../tools/osbuilder/rootfs-builder/ubuntu/config.sh).
|
||||
>
|
||||
> For a non-systemd-based distro, create an equivalent system
|
||||
> service using that distro’s init system syntax. Alternatively, you can build a distro
|
||||
|
||||
@@ -135,7 +135,7 @@ See also the [process overview](README.md#process-overview).
|
||||
|
||||
| Image type | Default distro | Init daemon | Reason | Notes |
|
||||
|-|-|-|-|-|
|
||||
| [image](background.md#root-filesystem-image) | [Clear Linux](https://clearlinux.org) (for x86_64 systems)| systemd | Minimal and highly optimized | systemd offers flexibility |
|
||||
| [image](background.md#root-filesystem-image) | [Ubuntu](https://ubuntu.com) (for x86_64 systems) | systemd | Fully tested in our CI | systemd offers flexibility |
|
||||
| [initrd](#initrd-image) | [Alpine Linux](https://alpinelinux.org) | Kata [agent](README.md#agent) (as no systemd support) | Security hardened and tiny C library |
|
||||
|
||||
See also:
|
||||
|
||||
@@ -29,16 +29,16 @@ __SNP-specific steps:__
|
||||
- Build the SNP-specific kernel as shown below (see this [guide](../../tools/packaging/kernel/README.md#build-kata-containers-kernel) for more information)
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/
|
||||
$ ./kernel/build-kernel.sh -a x86_64 -x snp setup
|
||||
$ ./kernel/build-kernel.sh -a x86_64 -x snp build
|
||||
$ sudo -E PATH="${PATH}" ./kernel/build-kernel.sh -x snp install
|
||||
$ ./kernel/build-kernel.sh -a x86_64 -x setup
|
||||
$ ./kernel/build-kernel.sh -a x86_64 -x build
|
||||
$ sudo -E PATH="${PATH}" ./kernel/build-kernel.sh -x install
|
||||
$ popd
|
||||
```
|
||||
- Build a current OVMF capable of SEV-SNP:
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/static-build/ovmf
|
||||
$ ./build.sh
|
||||
$ tar -xvf edk2-x86_64.tar.gz
|
||||
$ ovmf_build=sev ./build.sh
|
||||
$ tar -xvf edk2-sev.tar.gz
|
||||
$ popd
|
||||
```
|
||||
- Build a custom QEMU
|
||||
@@ -106,7 +106,7 @@ sev_snp_guest = true
|
||||
```
|
||||
- Configure an OVMF (add path)
|
||||
```toml
|
||||
firmware = "/path/to/kata-containers/tools/packaging/static-build/ovmf/opt/kata/share/ovmf/OVMF.fd"
|
||||
firmware = "/path/to/kata-containers/tools/packaging/static-build/ovmf/opt/kata/share/ovmf/AMDSEV.fd"
|
||||
```
|
||||
- SNP attestation (add cert-chain to default path or add the path with cert-chain)
|
||||
```toml
|
||||
|
||||
@@ -233,7 +233,7 @@ pub fn init_rootfs(
|
||||
// bind may be only specified in the oci spec options -> flags update r#type
|
||||
let m = &{
|
||||
let mut mbind = m.clone();
|
||||
if mbind.typ().is_none() && flags & MsFlags::MS_BIND == MsFlags::MS_BIND {
|
||||
if is_none_mount_type(mbind.typ()) && flags & MsFlags::MS_BIND == MsFlags::MS_BIND {
|
||||
mbind.set_typ(Some("bind".to_string()));
|
||||
}
|
||||
mbind
|
||||
@@ -397,6 +397,13 @@ fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) ->
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_none_mount_type(typ: &Option<String>) -> bool {
|
||||
match typ {
|
||||
Some(t) => t == "none",
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn mount_cgroups(
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
|
||||
@@ -1192,9 +1192,8 @@ mod tests {
|
||||
);
|
||||
spec.set_annotations(Some(annotations));
|
||||
|
||||
// create a file in /tmp/cdi with nvidia.json content
|
||||
let cdi_dir = PathBuf::from("/tmp/cdi");
|
||||
let cdi_file = cdi_dir.join("kata.json");
|
||||
let temp_dir = tempdir().expect("Failed to create temporary directory");
|
||||
let cdi_file = temp_dir.path().join("kata.json");
|
||||
|
||||
let cdi_version = "0.6.0";
|
||||
let kind = "kata.com/gpu";
|
||||
@@ -1242,10 +1241,10 @@ mod tests {
|
||||
}}"#
|
||||
);
|
||||
|
||||
fs::create_dir_all(&cdi_dir).unwrap();
|
||||
fs::write(&cdi_file, cdi_content).unwrap();
|
||||
fs::write(&cdi_file, cdi_content).expect("Failed to write CDI file");
|
||||
|
||||
let res = handle_cdi_devices(&logger, &mut spec, "/tmp/cdi", 0).await;
|
||||
let res =
|
||||
handle_cdi_devices(&logger, &mut spec, temp_dir.path().to_str().unwrap(), 0).await;
|
||||
println!("modfied spec {:?}", spec);
|
||||
assert!(res.is_ok(), "{}", res.err().unwrap());
|
||||
|
||||
@@ -1261,7 +1260,7 @@ mod tests {
|
||||
|
||||
let env = spec.process().as_ref().unwrap().env().as_ref().unwrap();
|
||||
|
||||
// find string TEST_OUTER_ENV in evn
|
||||
// find string TEST_OUTER_ENV in env
|
||||
let outer_env = env.iter().find(|e| e.starts_with("TEST_OUTER_ENV"));
|
||||
assert!(outer_env.is_some(), "TEST_OUTER_ENV not found in env");
|
||||
|
||||
|
||||
@@ -519,14 +519,13 @@ async fn launch_guest_component_procs(logger: &Logger, config: &AgentConfig) ->
|
||||
async fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<()> {
|
||||
launch_guest_component_procs(logger, config).await?;
|
||||
|
||||
fs::write(OCICRYPT_CONFIG_PATH, OCICRYPT_CONFIG.as_bytes())?;
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
|
||||
// If a CDH socket exists, initialize the CDH client
|
||||
// If a CDH socket exists, initialize the CDH client and enable ocicrypt
|
||||
match tokio::fs::metadata(CDH_SOCKET).await {
|
||||
Ok(md) => {
|
||||
if md.file_type().is_socket() {
|
||||
cdh::init_cdh_client(CDH_SOCKET_URI).await?;
|
||||
fs::write(OCICRYPT_CONFIG_PATH, OCICRYPT_CONFIG.as_bytes())?;
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
} else {
|
||||
debug!(logger, "File {} is not a socket", CDH_SOCKET);
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ use std::time::Instant;
|
||||
use lazy_static::lazy_static;
|
||||
use nix::mount::{mount, MntFlags, MsFlags};
|
||||
use nix::{unistd, NixPath};
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
use crate::fs::is_symlink;
|
||||
use crate::sl;
|
||||
@@ -799,8 +800,20 @@ pub fn get_mount_options(options: &Option<Vec<String>>) -> Vec<String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_mount_type(typ: &Option<String>) -> String {
|
||||
typ.clone().unwrap_or("bind".to_string())
|
||||
pub fn get_mount_type(m: &oci::Mount) -> String {
|
||||
m.typ()
|
||||
.clone()
|
||||
.map(|typ| {
|
||||
if typ.as_str() == "none" {
|
||||
if let Some(opts) = m.options() {
|
||||
if opts.iter().any(|opt| opt == "bind" || opt == "rbind") {
|
||||
return "bind".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
typ
|
||||
})
|
||||
.unwrap_or("bind".to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -580,7 +580,7 @@ static-checks-build: $(GENERATED_FILES)
|
||||
$(TARGET): $(GENERATED_FILES) $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE) $(EXTRA_RUSTFEATURES)
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) $(if $(findstring release,$(BUILD_TYPE)),--release) $(EXTRA_RUSTFEATURES)
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed \
|
||||
|
||||
@@ -438,7 +438,7 @@ impl VfioDevice {
|
||||
let mut hostdev: HostDevice = self
|
||||
.set_vfio_config(iommu_devs_path.clone(), device)
|
||||
.context("set vfio config failed")?;
|
||||
let dev_prefix = self.get_vfio_prefix();
|
||||
let dev_prefix = format!("{}_{}", self.get_vfio_prefix(), &vfio_group);
|
||||
hostdev.hostdev_id = make_device_nameid(&dev_prefix, index, MAX_DEV_ID_SIZE);
|
||||
|
||||
self.devices.push(hostdev);
|
||||
|
||||
@@ -369,6 +369,88 @@ impl ToQemuParams for Cpu {
|
||||
}
|
||||
}
|
||||
|
||||
/// Error type for CCW Subchannel operations
|
||||
#[derive(Debug)]
|
||||
enum CcwError {
|
||||
DeviceAlreadyExists(String), // Error when trying to add an existing device
|
||||
#[allow(dead_code)]
|
||||
DeviceNotFound(String), // Error when trying to remove a nonexistent device
|
||||
}
|
||||
|
||||
/// Represents a CCW subchannel for managing devices
|
||||
#[derive(Debug)]
|
||||
struct CcwSubChannel {
|
||||
devices: HashMap<String, u32>, // Maps device IDs to slot indices
|
||||
addr: u32, // Subchannel address
|
||||
next_slot: u32, // Next available slot index
|
||||
}
|
||||
|
||||
impl CcwSubChannel {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
devices: HashMap::new(),
|
||||
addr: 0,
|
||||
next_slot: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a device to the subchannel.
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `dev_id`: device ID to add
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Result<u32, CcwError>`: slot index of the added device
|
||||
/// or an error if the device already exists
|
||||
fn add_device(&mut self, dev_id: &str) -> Result<u32, CcwError> {
|
||||
if self.devices.contains_key(dev_id) {
|
||||
Err(CcwError::DeviceAlreadyExists(dev_id.to_owned()))
|
||||
} else {
|
||||
let slot = self.next_slot;
|
||||
self.devices.insert(dev_id.to_owned(), slot);
|
||||
self.next_slot += 1;
|
||||
Ok(slot)
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes a device from the subchannel by its ID.
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `dev_id`: device ID to remove
|
||||
///
|
||||
/// # Returns
|
||||
/// - `Result<(), CcwError>`: Ok(()) if the device was removed
|
||||
/// or an error if the device was not found
|
||||
#[allow(dead_code)]
|
||||
fn remove_device(&mut self, dev_id: &str) -> Result<(), CcwError> {
|
||||
if self.devices.remove(dev_id).is_some() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(CcwError::DeviceNotFound(dev_id.to_owned()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Formats the CCW address for a given slot
|
||||
///
|
||||
/// # Arguments
|
||||
/// - `slot`: slot index
|
||||
///
|
||||
/// # Returns
|
||||
/// - `String`: formatted CCW address (e.g. `fe.0.0000`)
|
||||
fn address_format_ccw(&self, slot: u32) -> String {
|
||||
format!("fe.{:x}.{:04x}", self.addr, slot)
|
||||
}
|
||||
|
||||
/// Sets the address of the subchannel.
|
||||
/// # Arguments
|
||||
/// - `addr`: subchannel address to set
|
||||
#[allow(dead_code)]
|
||||
fn set_addr(&mut self, addr: u32) -> &mut Self {
|
||||
self.addr = addr;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Machine {
|
||||
r#type: String,
|
||||
@@ -657,10 +739,16 @@ struct DeviceVhostUserFs {
|
||||
queue_size: u64,
|
||||
romfile: String,
|
||||
iommu_platform: bool,
|
||||
devno: Option<String>,
|
||||
}
|
||||
|
||||
impl DeviceVhostUserFs {
|
||||
fn new(chardev: &str, tag: &str, bus_type: VirtioBusType) -> DeviceVhostUserFs {
|
||||
fn new(
|
||||
chardev: &str,
|
||||
tag: &str,
|
||||
bus_type: VirtioBusType,
|
||||
devno: Option<String>,
|
||||
) -> DeviceVhostUserFs {
|
||||
DeviceVhostUserFs {
|
||||
bus_type,
|
||||
chardev: chardev.to_owned(),
|
||||
@@ -668,6 +756,7 @@ impl DeviceVhostUserFs {
|
||||
queue_size: 0,
|
||||
romfile: String::new(),
|
||||
iommu_platform: false,
|
||||
devno,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -715,6 +804,9 @@ impl ToQemuParams for DeviceVhostUserFs {
|
||||
if self.iommu_platform {
|
||||
params.push("iommu_platform=on".to_owned());
|
||||
}
|
||||
if let Some(devno) = &self.devno {
|
||||
params.push(format!("devno={}", devno));
|
||||
}
|
||||
Ok(vec!["-device".to_owned(), params.join(",")])
|
||||
}
|
||||
}
|
||||
@@ -835,15 +927,17 @@ struct DeviceVirtioBlk {
|
||||
id: String,
|
||||
config_wce: bool,
|
||||
share_rw: bool,
|
||||
devno: Option<String>,
|
||||
}
|
||||
|
||||
impl DeviceVirtioBlk {
|
||||
fn new(id: &str, bus_type: VirtioBusType) -> DeviceVirtioBlk {
|
||||
fn new(id: &str, bus_type: VirtioBusType, devno: Option<String>) -> DeviceVirtioBlk {
|
||||
DeviceVirtioBlk {
|
||||
bus_type,
|
||||
id: id.to_owned(),
|
||||
config_wce: false,
|
||||
share_rw: true,
|
||||
devno,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -877,7 +971,9 @@ impl ToQemuParams for DeviceVirtioBlk {
|
||||
params.push("share-rw=off".to_owned());
|
||||
}
|
||||
params.push(format!("serial=image-{}", self.id));
|
||||
|
||||
if let Some(devno) = &self.devno {
|
||||
params.push(format!("devno={}", devno));
|
||||
}
|
||||
Ok(vec!["-device".to_owned(), params.join(",")])
|
||||
}
|
||||
}
|
||||
@@ -888,16 +984,23 @@ struct VhostVsock {
|
||||
guest_cid: u32,
|
||||
disable_modern: bool,
|
||||
iommu_platform: bool,
|
||||
devno: Option<String>,
|
||||
}
|
||||
|
||||
impl VhostVsock {
|
||||
fn new(vhostfd: tokio::fs::File, guest_cid: u32, bus_type: VirtioBusType) -> VhostVsock {
|
||||
fn new(
|
||||
vhostfd: tokio::fs::File,
|
||||
guest_cid: u32,
|
||||
bus_type: VirtioBusType,
|
||||
devno: Option<String>,
|
||||
) -> VhostVsock {
|
||||
VhostVsock {
|
||||
bus_type,
|
||||
vhostfd,
|
||||
guest_cid,
|
||||
disable_modern: false,
|
||||
iommu_platform: false,
|
||||
devno,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -923,6 +1026,9 @@ impl ToQemuParams for VhostVsock {
|
||||
if self.iommu_platform {
|
||||
params.push("iommu_platform=on".to_owned());
|
||||
}
|
||||
if let Some(devno) = &self.devno {
|
||||
params.push(format!("devno={}", devno));
|
||||
}
|
||||
params.push(format!("vhostfd={}", self.vhostfd.as_raw_fd()));
|
||||
params.push(format!("guest-cid={}", self.guest_cid));
|
||||
|
||||
@@ -1154,14 +1260,16 @@ struct DeviceVirtioSerial {
|
||||
id: String,
|
||||
bus_type: VirtioBusType,
|
||||
iommu_platform: bool,
|
||||
devno: Option<String>,
|
||||
}
|
||||
|
||||
impl DeviceVirtioSerial {
|
||||
fn new(id: &str, bus_type: VirtioBusType) -> DeviceVirtioSerial {
|
||||
fn new(id: &str, bus_type: VirtioBusType, devno: Option<String>) -> DeviceVirtioSerial {
|
||||
DeviceVirtioSerial {
|
||||
id: id.to_owned(),
|
||||
bus_type,
|
||||
iommu_platform: false,
|
||||
devno,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1180,6 +1288,9 @@ impl ToQemuParams for DeviceVirtioSerial {
|
||||
if self.iommu_platform {
|
||||
params.push("iommu_platform=on".to_owned());
|
||||
}
|
||||
if let Some(devno) = &self.devno {
|
||||
params.push(format!("devno={}", devno));
|
||||
}
|
||||
Ok(vec!["-device".to_owned(), params.join(",")])
|
||||
}
|
||||
}
|
||||
@@ -1520,21 +1631,30 @@ struct DeviceVirtioScsi {
|
||||
id: String,
|
||||
disable_modern: bool,
|
||||
iothread: String,
|
||||
iommu_platform: bool,
|
||||
devno: Option<String>,
|
||||
}
|
||||
|
||||
impl DeviceVirtioScsi {
|
||||
fn new(id: &str, disable_modern: bool, bus_type: VirtioBusType) -> Self {
|
||||
fn new(id: &str, disable_modern: bool, bus_type: VirtioBusType, devno: Option<String>) -> Self {
|
||||
DeviceVirtioScsi {
|
||||
bus_type,
|
||||
id: id.to_owned(),
|
||||
disable_modern,
|
||||
iothread: "".to_owned(),
|
||||
iommu_platform: false,
|
||||
devno,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_iothread(&mut self, iothread: &str) {
|
||||
self.iothread = iothread.to_owned();
|
||||
}
|
||||
|
||||
fn set_iommu_platform(&mut self, iommu_platform: bool) -> &mut Self {
|
||||
self.iommu_platform = iommu_platform;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -1549,6 +1669,12 @@ impl ToQemuParams for DeviceVirtioScsi {
|
||||
if !self.iothread.is_empty() {
|
||||
params.push(format!("iothread={}", self.iothread));
|
||||
}
|
||||
if self.iommu_platform {
|
||||
params.push("iommu_platform=on".to_owned());
|
||||
}
|
||||
if let Some(devno) = &self.devno {
|
||||
params.push(format!("devno={}", devno));
|
||||
}
|
||||
Ok(vec!["-device".to_owned(), params.join(",")])
|
||||
}
|
||||
}
|
||||
@@ -1622,10 +1748,15 @@ pub struct QemuCmdLine<'a> {
|
||||
knobs: Knobs,
|
||||
|
||||
devices: Vec<Box<dyn ToQemuParams>>,
|
||||
ccw_subchannel: Option<CcwSubChannel>,
|
||||
}
|
||||
|
||||
impl<'a> QemuCmdLine<'a> {
|
||||
pub fn new(id: &str, config: &'a HypervisorConfig) -> Result<QemuCmdLine<'a>> {
|
||||
let ccw_subchannel = match bus_type(config) {
|
||||
VirtioBusType::Ccw => Some(CcwSubChannel::new()),
|
||||
_ => None,
|
||||
};
|
||||
let mut qemu_cmd_line = QemuCmdLine {
|
||||
id: id.to_string(),
|
||||
config,
|
||||
@@ -1637,6 +1768,7 @@ impl<'a> QemuCmdLine<'a> {
|
||||
qmp_socket: QmpSocket::new(MonitorProtocol::Qmp)?,
|
||||
knobs: Knobs::new(config),
|
||||
devices: Vec::new(),
|
||||
ccw_subchannel,
|
||||
};
|
||||
|
||||
if config.device_info.enable_iommu {
|
||||
@@ -1703,8 +1835,20 @@ impl<'a> QemuCmdLine<'a> {
|
||||
}
|
||||
|
||||
fn add_scsi_controller(&mut self) {
|
||||
let mut virtio_scsi =
|
||||
DeviceVirtioScsi::new("scsi0", should_disable_modern(), bus_type(self.config));
|
||||
let devno = get_devno_ccw(&mut self.ccw_subchannel, "scsi0");
|
||||
let mut virtio_scsi = DeviceVirtioScsi::new(
|
||||
"scsi0",
|
||||
should_disable_modern(),
|
||||
bus_type(self.config),
|
||||
devno,
|
||||
);
|
||||
|
||||
if self.config.device_info.enable_iommu_platform
|
||||
&& bus_type(self.config) == VirtioBusType::Ccw
|
||||
{
|
||||
virtio_scsi.set_iommu_platform(true);
|
||||
}
|
||||
|
||||
if self.config.enable_iothreads {
|
||||
let iothread_id = "scsi-io-thread";
|
||||
let iothread = ObjectIoThread::new(iothread_id);
|
||||
@@ -1731,8 +1875,8 @@ impl<'a> QemuCmdLine<'a> {
|
||||
self.devices.push(Box::new(virtiofsd_socket_chardev));
|
||||
|
||||
let bus_type = bus_type(self.config);
|
||||
|
||||
let mut virtiofs_device = DeviceVhostUserFs::new(chardev_name, mount_tag, bus_type);
|
||||
let devno = get_devno_ccw(&mut self.ccw_subchannel, chardev_name);
|
||||
let mut virtiofs_device = DeviceVhostUserFs::new(chardev_name, mount_tag, bus_type, devno);
|
||||
virtiofs_device.set_queue_size(queue_size);
|
||||
if self.config.device_info.enable_iommu_platform && bus_type == VirtioBusType::Ccw {
|
||||
virtiofs_device.set_iommu_platform(true);
|
||||
@@ -1762,13 +1906,16 @@ impl<'a> QemuCmdLine<'a> {
|
||||
pub fn add_vsock(&mut self, vhostfd: tokio::fs::File, guest_cid: u32) -> Result<()> {
|
||||
clear_cloexec(vhostfd.as_raw_fd()).context("clearing O_CLOEXEC failed on vsock fd")?;
|
||||
|
||||
let mut vhost_vsock_pci = VhostVsock::new(vhostfd, guest_cid, bus_type(self.config));
|
||||
let devno = get_devno_ccw(&mut self.ccw_subchannel, "vsock-0");
|
||||
let mut vhost_vsock_pci = VhostVsock::new(vhostfd, guest_cid, bus_type(self.config), devno);
|
||||
|
||||
if !self.config.disable_nesting_checks && should_disable_modern() {
|
||||
vhost_vsock_pci.set_disable_modern(true);
|
||||
}
|
||||
|
||||
if self.config.device_info.enable_iommu_platform {
|
||||
if self.config.device_info.enable_iommu_platform
|
||||
&& bus_type(self.config) == VirtioBusType::Ccw
|
||||
{
|
||||
vhost_vsock_pci.set_iommu_platform(true);
|
||||
}
|
||||
|
||||
@@ -1809,9 +1956,11 @@ impl<'a> QemuCmdLine<'a> {
|
||||
pub fn add_block_device(&mut self, device_id: &str, path: &str) -> Result<()> {
|
||||
self.devices
|
||||
.push(Box::new(BlockBackend::new(device_id, path)));
|
||||
let devno = get_devno_ccw(&mut self.ccw_subchannel, device_id);
|
||||
self.devices.push(Box::new(DeviceVirtioBlk::new(
|
||||
device_id,
|
||||
bus_type(self.config),
|
||||
devno,
|
||||
)));
|
||||
Ok(())
|
||||
}
|
||||
@@ -1836,7 +1985,8 @@ impl<'a> QemuCmdLine<'a> {
|
||||
}
|
||||
|
||||
pub fn add_console(&mut self, console_socket_path: &str) {
|
||||
let mut serial_dev = DeviceVirtioSerial::new("serial0", bus_type(self.config));
|
||||
let devno = get_devno_ccw(&mut self.ccw_subchannel, "serial0");
|
||||
let mut serial_dev = DeviceVirtioSerial::new("serial0", bus_type(self.config), devno);
|
||||
if self.config.device_info.enable_iommu_platform
|
||||
&& bus_type(self.config) == VirtioBusType::Ccw
|
||||
{
|
||||
@@ -1909,3 +2059,15 @@ pub fn get_network_device(
|
||||
|
||||
Ok((netdev, virtio_net_device))
|
||||
}
|
||||
|
||||
fn get_devno_ccw(ccw_subchannel: &mut Option<CcwSubChannel>, device_name: &str) -> Option<String> {
|
||||
ccw_subchannel.as_mut().and_then(|subchannel| {
|
||||
subchannel.add_device(device_name).map_or_else(
|
||||
|err| {
|
||||
info!(sl!(), "failed to add device to subchannel: {:?}", err);
|
||||
None
|
||||
},
|
||||
|slot| Some(subchannel.address_format_ccw(slot)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ impl ShareFsMount for VirtiofsShareMount {
|
||||
guest_path,
|
||||
storages,
|
||||
});
|
||||
} else if get_mount_type(config.mount.typ()).as_str() == mount::KATA_EPHEMERAL_VOLUME_TYPE {
|
||||
} else if get_mount_type(&config.mount).as_str() == mount::KATA_EPHEMERAL_VOLUME_TYPE {
|
||||
// refer to the golang `handleEphemeralStorage` code at
|
||||
// https://github.com/kata-containers/kata-containers/blob/9516286f6dd5cfd6b138810e5d7c9e01cf6fc043/src/runtime/virtcontainers/kata_agent.go#L1354
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ pub(crate) async fn handle_direct_volume(
|
||||
}
|
||||
|
||||
pub(crate) fn is_direct_volume(m: &oci::Mount) -> Result<bool> {
|
||||
let mnt_type = get_mount_type(m.typ());
|
||||
let mnt_type = get_mount_type(m);
|
||||
let mount_type = mnt_type.as_str();
|
||||
|
||||
// Filter the non-bind volume and non-direct-vol volume
|
||||
|
||||
@@ -125,7 +125,7 @@ impl SPDKVolume {
|
||||
.context("generate host-guest shared path failed")?;
|
||||
storage.mount_point = guest_path.clone();
|
||||
|
||||
if get_mount_type(m.typ()).as_str() != "bind" {
|
||||
if get_mount_type(m).as_str() != "bind" {
|
||||
storage.fs_type = mount_info.fs_type.clone();
|
||||
} else {
|
||||
storage.fs_type = DEFAULT_VOLUME_FS_TYPE.to_string();
|
||||
|
||||
@@ -80,7 +80,7 @@ impl VfioVolume {
|
||||
.context("generate host-guest shared path failed")?;
|
||||
storage.mount_point = guest_path.clone();
|
||||
|
||||
if get_mount_type(m.typ()).as_str() != "bind" {
|
||||
if get_mount_type(m).as_str() != "bind" {
|
||||
storage.fs_type = mount_info.fs_type.clone();
|
||||
} else {
|
||||
storage.fs_type = DEFAULT_VOLUME_FS_TYPE.to_string();
|
||||
|
||||
@@ -308,8 +308,8 @@ impl Volume for ShareFsVolume {
|
||||
}
|
||||
|
||||
pub(crate) fn is_share_fs_volume(m: &oci::Mount) -> bool {
|
||||
(get_mount_type(m.typ()).as_str() == "bind"
|
||||
|| get_mount_type(m.typ()).as_str() == mount::KATA_EPHEMERAL_VOLUME_TYPE)
|
||||
let mount_type = get_mount_type(m);
|
||||
(mount_type == "bind" || mount_type == mount::KATA_EPHEMERAL_VOLUME_TYPE)
|
||||
&& !is_host_device(&get_mount_path(&Some(m.destination().clone())))
|
||||
&& !is_system_mount(&get_mount_path(m.source()))
|
||||
}
|
||||
|
||||
@@ -115,5 +115,5 @@ impl Volume for ShmVolume {
|
||||
|
||||
pub(crate) fn is_shm_volume(m: &oci::Mount) -> bool {
|
||||
get_mount_path(&Some(m.destination().clone())).as_str() == "/dev/shm"
|
||||
&& get_mount_type(m.typ()).as_str() != KATA_EPHEMERAL_DEV_TYPE
|
||||
&& get_mount_type(m).as_str() != KATA_EPHEMERAL_DEV_TYPE
|
||||
}
|
||||
|
||||
@@ -128,11 +128,13 @@ impl Process {
|
||||
|
||||
pub fn pre_fifos_open(&mut self) -> Result<()> {
|
||||
if let Some(ref stdout) = self.stdout {
|
||||
self.stdout_r = Some(open_fifo_read(stdout)?);
|
||||
self.stdout_r = Some(open_fifo_read(stdout).context("open stdout")?);
|
||||
}
|
||||
|
||||
if let Some(ref stderr) = self.stderr {
|
||||
self.stderr_r = Some(open_fifo_read(stderr)?);
|
||||
if !self.terminal {
|
||||
if let Some(ref stderr) = self.stderr {
|
||||
self.stderr_r = Some(open_fifo_read(stderr).context("open stderr")?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -9,13 +9,13 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: -c .golangci.yml -v
|
||||
|
||||
@@ -513,8 +513,14 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
|
||||
|
||||
// Create the VM memory config via the constructor to ensure default values are properly assigned
|
||||
clh.vmconfig.Memory = chclient.NewMemoryConfig(int64((utils.MemUnit(clh.config.MemorySize) * utils.MiB).ToBytes()))
|
||||
// shared memory should be enabled if using vhost-user(kata uses virtiofsd)
|
||||
clh.vmconfig.Memory.Shared = func(b bool) *bool { return &b }(true)
|
||||
// Memory config shared is to be enabled when using vhost_user backends, ex. virtio-fs
|
||||
// or when using HugePages.
|
||||
// If such features are disabled, turn off shared memory config.
|
||||
if clh.config.SharedFS == config.NoSharedFS && !clh.config.HugePages {
|
||||
clh.vmconfig.Memory.Shared = func(b bool) *bool { return &b }(false)
|
||||
} else {
|
||||
clh.vmconfig.Memory.Shared = func(b bool) *bool { return &b }(true)
|
||||
}
|
||||
// Enable hugepages if needed
|
||||
clh.vmconfig.Memory.Hugepages = func(b bool) *bool { return &b }(clh.config.HugePages)
|
||||
if !clh.config.ConfidentialGuest {
|
||||
@@ -1627,7 +1633,7 @@ func (clh *cloudHypervisor) getDiskRateLimiterConfig() *chclient.RateLimiterConf
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) addNet(e Endpoint) error {
|
||||
clh.Logger().WithField("endpoint-type", e).Debugf("Adding Endpoint of type %v", e)
|
||||
clh.Logger().WithField("endpoint", e).Debugf("Adding Endpoint of type %v", e.Type())
|
||||
|
||||
mac := e.HardwareAddr()
|
||||
netPair := e.NetworkPair()
|
||||
|
||||
@@ -41,6 +41,9 @@ const (
|
||||
var defaultDialTimeout = 30 * time.Second
|
||||
|
||||
var hybridVSockPort uint32
|
||||
var hybridVSockErrors uint32 = 0
|
||||
|
||||
const hybridVSockErrorsSkip uint32 = 128
|
||||
|
||||
var agentClientFields = logrus.Fields{
|
||||
"name": "agent-client",
|
||||
@@ -425,9 +428,16 @@ func HybridVSockDialer(sock string, timeout time.Duration) (net.Conn, error) {
|
||||
case err = <-errChan:
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
agentClientLog.WithField("Error", err).Debug("HybridVsock trivial handshake failed")
|
||||
return nil, err
|
||||
|
||||
// With full debug logging enabled there might be around 1,500 redials in a tight loop, so
|
||||
// skip logging some of these failures to avoid flooding the log.
|
||||
errorsCount := hybridVSockErrors
|
||||
if errorsCount%hybridVSockErrorsSkip == 0 {
|
||||
agentClientLog.WithField("Error", err).WithField("count", errorsCount).Debug("HybridVsock trivial handshake failed")
|
||||
}
|
||||
hybridVSockErrors = errorsCount + 1
|
||||
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case <-time.After(handshakeTimeout):
|
||||
|
||||
1
src/tools/csi-kata-directvolume/.gitignore
vendored
Normal file
1
src/tools/csi-kata-directvolume/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
bin/
|
||||
@@ -56,7 +56,7 @@ ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH))
|
||||
# Specific packages can be excluded from each of the tests below by setting the *_FILTER_CMD variables
|
||||
# to something like "| grep -v 'github.com/kubernetes-csi/project/pkg/foobar'". See usage below.
|
||||
|
||||
build-%: check-go-version-go
|
||||
build-%:
|
||||
mkdir -p bin
|
||||
CGO_ENABLED=0 GOOS=linux go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$*
|
||||
if [ "$$ARCH" = "amd64" ]; then \
|
||||
@@ -92,9 +92,6 @@ push: $(CMDS:%=push-%)
|
||||
clean:
|
||||
-rm -rf bin
|
||||
|
||||
test: check-go-version-go
|
||||
|
||||
|
||||
.PHONY: test-vet
|
||||
test: test-vet
|
||||
test-vet:
|
||||
@@ -111,12 +108,3 @@ test-fmt:
|
||||
gofmt -d $$files; \
|
||||
false; \
|
||||
fi
|
||||
|
||||
|
||||
# Targets in the makefile can depend on check-go-version-<path to go binary>
|
||||
# to trigger a warning if the x.y version of that binary does not match
|
||||
# what the project uses. Make ensures that this is only checked once per
|
||||
# invocation.
|
||||
.PHONY: check-go-version-%
|
||||
check-go-version-%:
|
||||
./release-tools/verify-go-version.sh "$*"
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright 2019 The Kubernetes Authors.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
GO="$1"
|
||||
|
||||
if [ ! "$GO" ]; then
|
||||
echo >&2 "usage: $0 <path to go binary>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
die () {
|
||||
echo "ERROR: $*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
version=$("$GO" version) || die "determining version of $GO failed"
|
||||
# shellcheck disable=SC2001
|
||||
majorminor=$(echo "$version" | sed -e 's/.*go\([0-9]*\)\.\([0-9]*\).*/\1.\2/')
|
||||
|
||||
if [ "$majorminor" != "$expected" ]; then
|
||||
cat >&2 <<EOF
|
||||
|
||||
======================================================
|
||||
WARNING
|
||||
|
||||
Compile the Project with Go version v$majorminor !
|
||||
|
||||
======================================================
|
||||
|
||||
EOF
|
||||
fi
|
||||
@@ -305,7 +305,6 @@
|
||||
"oci_version": "1.1.0"
|
||||
},
|
||||
"cluster_config": {
|
||||
"default_namespace": "default",
|
||||
"pause_container_image": "mcr.microsoft.com/oss/kubernetes/pause:3.6"
|
||||
},
|
||||
"request_defaults": {
|
||||
|
||||
@@ -7,8 +7,6 @@ package agent_policy
|
||||
import future.keywords.in
|
||||
import future.keywords.every
|
||||
|
||||
import input
|
||||
|
||||
# Default values, returned by OPA when rules cannot be evaluated to true.
|
||||
default AddARPNeighborsRequest := false
|
||||
default AddSwapRequest := false
|
||||
@@ -51,6 +49,10 @@ default WriteStreamRequest := false
|
||||
# them and inspect OPA logs for the root cause of a failure.
|
||||
default AllowRequestsFailingPolicy := false
|
||||
|
||||
# Constants
|
||||
S_NAME_KEY = "io.kubernetes.cri.sandbox-name"
|
||||
S_NAMESPACE_KEY = "io.kubernetes.cri.sandbox-namespace"
|
||||
|
||||
CreateContainerRequest:= {"ops": ops, "allowed": true} {
|
||||
# Check if the input request should be rejected even before checking the
|
||||
# policy_data.containers information.
|
||||
@@ -64,9 +66,9 @@ CreateContainerRequest:= {"ops": ops, "allowed": true} {
|
||||
ops_builder := []
|
||||
|
||||
# check sandbox name
|
||||
sandbox_name = i_oci.Annotations["io.kubernetes.cri.sandbox-name"]
|
||||
sandbox_name = i_oci.Annotations[S_NAME_KEY]
|
||||
add_sandbox_name_to_state := state_allows("sandbox_name", sandbox_name)
|
||||
ops := concat_op_if_not_null(ops_builder, add_sandbox_name_to_state)
|
||||
ops_builder1 := concat_op_if_not_null(ops_builder, add_sandbox_name_to_state)
|
||||
|
||||
# Check if any element from the policy_data.containers array allows the input request.
|
||||
some p_container in policy_data.containers
|
||||
@@ -79,6 +81,13 @@ CreateContainerRequest:= {"ops": ops, "allowed": true} {
|
||||
|
||||
p_oci := p_container.OCI
|
||||
|
||||
# check namespace
|
||||
p_namespace := p_oci.Annotations[S_NAMESPACE_KEY]
|
||||
i_namespace := i_oci.Annotations[S_NAMESPACE_KEY]
|
||||
print ("CreateContainerRequest: p_namespace =", p_namespace, "i_namespace =", i_namespace)
|
||||
add_namespace_to_state := allow_namespace(p_namespace, i_namespace)
|
||||
ops := concat_op_if_not_null(ops_builder1, add_namespace_to_state)
|
||||
|
||||
print("CreateContainerRequest: p Version =", p_oci.Version, "i Version =", i_oci.Version)
|
||||
p_oci.Version == i_oci.Version
|
||||
|
||||
@@ -129,6 +138,18 @@ allow_create_container_input {
|
||||
print("allow_create_container_input: true")
|
||||
}
|
||||
|
||||
allow_namespace(p_namespace, i_namespace) = add_namespace {
|
||||
p_namespace == i_namespace
|
||||
add_namespace := null
|
||||
print("allow_namespace 1: input namespace matches policy data")
|
||||
}
|
||||
|
||||
allow_namespace(p_namespace, i_namespace) = add_namespace {
|
||||
p_namespace == ""
|
||||
print("allow_namespace 2: no namespace found on policy data")
|
||||
add_namespace := state_allows("namespace", i_namespace)
|
||||
}
|
||||
|
||||
# value hasn't been seen before, save it to state
|
||||
state_allows(key, value) = action {
|
||||
state := get_state()
|
||||
@@ -156,7 +177,8 @@ get_state() = state {
|
||||
}
|
||||
|
||||
get_state_path(key) = path {
|
||||
path := concat("/", ["", key]) # prepend "/" to key
|
||||
# prepend "/pstate/" to key
|
||||
path := concat("/", ["/pstate", key])
|
||||
}
|
||||
|
||||
# Helper functions to conditionally concatenate if op is not null
|
||||
@@ -208,16 +230,14 @@ allow_anno_key(i_key, p_oci) {
|
||||
print("allow_anno_key 2: true")
|
||||
}
|
||||
|
||||
# Get the value of the "io.kubernetes.cri.sandbox-name" annotation and
|
||||
# Get the value of the S_NAME_KEY annotation and
|
||||
# correlate it with other annotations and process fields.
|
||||
allow_by_anno(p_oci, i_oci, p_storages, i_storages) {
|
||||
print("allow_by_anno 1: start")
|
||||
|
||||
s_name := "io.kubernetes.cri.sandbox-name"
|
||||
not p_oci.Annotations[S_NAME_KEY]
|
||||
|
||||
not p_oci.Annotations[s_name]
|
||||
|
||||
i_s_name := i_oci.Annotations[s_name]
|
||||
i_s_name := i_oci.Annotations[S_NAME_KEY]
|
||||
print("allow_by_anno 1: i_s_name =", i_s_name)
|
||||
|
||||
allow_by_sandbox_name(p_oci, i_oci, p_storages, i_storages, i_s_name)
|
||||
@@ -227,10 +247,8 @@ allow_by_anno(p_oci, i_oci, p_storages, i_storages) {
|
||||
allow_by_anno(p_oci, i_oci, p_storages, i_storages) {
|
||||
print("allow_by_anno 2: start")
|
||||
|
||||
s_name := "io.kubernetes.cri.sandbox-name"
|
||||
|
||||
p_s_name := p_oci.Annotations[s_name]
|
||||
i_s_name := i_oci.Annotations[s_name]
|
||||
p_s_name := p_oci.Annotations[S_NAME_KEY]
|
||||
i_s_name := i_oci.Annotations[S_NAME_KEY]
|
||||
print("allow_by_anno 2: i_s_name =", i_s_name, "p_s_name =", p_s_name)
|
||||
|
||||
allow_sandbox_name(p_s_name, i_s_name)
|
||||
@@ -242,14 +260,9 @@ allow_by_anno(p_oci, i_oci, p_storages, i_storages) {
|
||||
allow_by_sandbox_name(p_oci, i_oci, p_storages, i_storages, s_name) {
|
||||
print("allow_by_sandbox_name: start")
|
||||
|
||||
s_namespace := "io.kubernetes.cri.sandbox-namespace"
|
||||
i_namespace := i_oci.Annotations[S_NAMESPACE_KEY]
|
||||
|
||||
p_namespace := p_oci.Annotations[s_namespace]
|
||||
i_namespace := i_oci.Annotations[s_namespace]
|
||||
print("allow_by_sandbox_name: p_namespace =", p_namespace, "i_namespace =", i_namespace)
|
||||
p_namespace == i_namespace
|
||||
|
||||
allow_by_container_types(p_oci, i_oci, s_name, p_namespace)
|
||||
allow_by_container_types(p_oci, i_oci, s_name, i_namespace)
|
||||
allow_by_bundle_or_sandbox_id(p_oci, i_oci, p_storages, i_storages)
|
||||
allow_process(p_oci, i_oci, s_name)
|
||||
|
||||
|
||||
@@ -26,6 +26,9 @@ pub struct ObjectMeta {
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub namespace: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub uid: Option<String>,
|
||||
}
|
||||
|
||||
impl ObjectMeta {
|
||||
|
||||
@@ -388,8 +388,6 @@ pub struct CommonData {
|
||||
/// Configuration from "kubectl config".
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ClusterConfig {
|
||||
default_namespace: String,
|
||||
|
||||
/// Pause container image reference.
|
||||
pub pause_container_image: String,
|
||||
}
|
||||
@@ -532,15 +530,7 @@ impl AgentPolicy {
|
||||
let mut root = c_settings.Root.clone();
|
||||
root.Readonly = yaml_container.read_only_root_filesystem();
|
||||
|
||||
let namespace = match resource.get_namespace() {
|
||||
Some(ns) if !ns.is_empty() => ns,
|
||||
_ => self
|
||||
.config
|
||||
.settings
|
||||
.cluster_config
|
||||
.default_namespace
|
||||
.clone(),
|
||||
};
|
||||
let namespace = resource.get_namespace().unwrap_or_default();
|
||||
|
||||
let use_host_network = resource.use_host_network();
|
||||
let annotations = get_container_annotations(
|
||||
|
||||
@@ -358,7 +358,6 @@ fn get_server_socket(sandbox_id: &str) -> anyhow::Result<String> {
|
||||
}
|
||||
|
||||
fn do_run_exec(sandbox_id: &str, dbg_console_vport: u32) -> anyhow::Result<()> {
|
||||
// sandbox_id MUST be a long ID.
|
||||
let server_url = get_server_socket(sandbox_id).context("get debug console socket URL")?;
|
||||
if server_url.is_empty() {
|
||||
return Err(anyhow!("server url is empty."));
|
||||
|
||||
@@ -156,7 +156,9 @@ create_mediated_device() {
|
||||
_APID=${APQN//.*}
|
||||
_APQI=${APQN#*.}
|
||||
APID=$(echo ${_APID} | sed 's/^0*//')
|
||||
APID=${APID:-0}
|
||||
APQI=$(echo ${_APQI} | sed 's/^0*//')
|
||||
APQI=${APQI:-0}
|
||||
|
||||
# Release the device from the host
|
||||
pushd ${sys_bus_base}
|
||||
|
||||
@@ -578,6 +578,7 @@ function main() {
|
||||
install-kbs-client) install_kbs_client ;;
|
||||
install-kubectl) install_kubectl ;;
|
||||
get-cluster-credentials) get_cluster_credentials ;;
|
||||
deploy-csi-driver) return 0 ;;
|
||||
deploy-kata) deploy_kata ;;
|
||||
deploy-kata-aks) deploy_kata "aks" ;;
|
||||
deploy-kata-kcli) deploy_kata "kcli" ;;
|
||||
@@ -599,6 +600,7 @@ function main() {
|
||||
cleanup-garm) cleanup "garm" ;;
|
||||
cleanup-zvsi) cleanup "zvsi" ;;
|
||||
cleanup-snapshotter) cleanup_snapshotter ;;
|
||||
delete-csi-driver) return 0 ;;
|
||||
delete-coco-kbs) delete_coco_kbs ;;
|
||||
delete-cluster) cleanup "aks" ;;
|
||||
delete-cluster-kcli) delete_cluster_kcli ;;
|
||||
|
||||
@@ -9,6 +9,8 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
|
||||
load "${BATS_TEST_DIRNAME}/tests_common.sh"
|
||||
|
||||
setup() {
|
||||
[ "${KATA_HYPERVISOR}" = "qemu-coco-dev" ] || skip "Test not stable on qemu-coco-dev. See issue #10616"
|
||||
|
||||
get_pod_config_dir
|
||||
job_name="job-pi-test"
|
||||
yaml_file="${pod_config_dir}/job.yaml"
|
||||
@@ -38,6 +40,8 @@ setup() {
|
||||
}
|
||||
|
||||
teardown() {
|
||||
[ "${KATA_HYPERVISOR}" = "qemu-coco-dev" ] || skip "Test not ready yet for ${KATA_HYPERVISOR}"
|
||||
|
||||
# Debugging information
|
||||
kubectl describe pod "$pod_name"
|
||||
kubectl describe jobs/"$job_name"
|
||||
|
||||
@@ -15,7 +15,6 @@ setup() {
|
||||
pod_yaml="${pod_config_dir}/pod-cpu-defaults.yaml"
|
||||
|
||||
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"
|
||||
set_namespace_to_policy_settings "${policy_settings_dir}" "${namespace_name}"
|
||||
auto_generate_policy "${policy_settings_dir}" "${pod_yaml}"
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: policy-pod
|
||||
uid: policy-pod-uid
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
|
||||
@@ -155,9 +155,6 @@ create_common_genpolicy_settings() {
|
||||
|
||||
cp "${default_genpolicy_settings_dir}/genpolicy-settings.json" "${genpolicy_settings_dir}"
|
||||
cp "${default_genpolicy_settings_dir}/rules.rego" "${genpolicy_settings_dir}"
|
||||
|
||||
# Set the default namespace of Kata CI tests in the genpolicy settings.
|
||||
set_namespace_to_policy_settings "${genpolicy_settings_dir}" "${TEST_CLUSTER_NAMESPACE}"
|
||||
}
|
||||
|
||||
# If auto-generated policy testing is enabled, make a copy of the common genpolicy settings
|
||||
@@ -273,21 +270,6 @@ add_copy_from_guest_to_policy_settings() {
|
||||
add_exec_to_policy_settings "${policy_settings_dir}" "${exec_command[@]}"
|
||||
}
|
||||
|
||||
# Change genpolicy settings to use a pod namespace different than "default".
|
||||
set_namespace_to_policy_settings() {
|
||||
local -r settings_dir="$1"
|
||||
local -r namespace="$2"
|
||||
|
||||
auto_generate_policy_enabled || return 0
|
||||
|
||||
info "${settings_dir}/genpolicy-settings.json: namespace: ${namespace}"
|
||||
jq --arg namespace "${namespace}" \
|
||||
'.cluster_config.default_namespace |= $namespace' \
|
||||
"${settings_dir}/genpolicy-settings.json" > \
|
||||
"${settings_dir}/new-genpolicy-settings.json"
|
||||
mv "${settings_dir}/new-genpolicy-settings.json" "${settings_dir}/genpolicy-settings.json"
|
||||
}
|
||||
|
||||
hard_coded_policy_tests_enabled() {
|
||||
# CI is testing hard-coded policies just on a the platforms listed here. Outside of CI,
|
||||
# users can enable testing of the same policies (plus the auto-generated policies) by
|
||||
|
||||
@@ -160,7 +160,7 @@ description = "measure container latency"
|
||||
checkvar = ".\"latency\".Results | .[] | .latency.Result"
|
||||
checktype = "mean"
|
||||
midval = 0.75
|
||||
minpercent = 20.0
|
||||
minpercent = 25.0
|
||||
maxpercent = 20.0
|
||||
|
||||
[[metric]]
|
||||
@@ -212,5 +212,5 @@ description = "iperf"
|
||||
checkvar = ".\"network-iperf3\".Results | .[] | .jitter.Result"
|
||||
checktype = "mean"
|
||||
midval = 0.044
|
||||
minpercent = 50.0
|
||||
minpercent = 60.0
|
||||
maxpercent = 50.0
|
||||
|
||||
@@ -199,8 +199,8 @@ description = "measure container parallel bandwidth using iperf3"
|
||||
checkvar = ".\"network-iperf3\".Results | .[] | .parallel.Result"
|
||||
checktype = "mean"
|
||||
midval = 52644229340.0
|
||||
minpercent = 20.0
|
||||
maxpercent = 20.0
|
||||
minpercent = 25.0
|
||||
maxpercent = 25.0
|
||||
|
||||
[[metric]]
|
||||
name = "network-iperf3"
|
||||
@@ -211,6 +211,6 @@ description = "iperf"
|
||||
# within (inclusive)
|
||||
checkvar = ".\"network-iperf3\".Results | .[] | .jitter.Result"
|
||||
checktype = "mean"
|
||||
midval = 0.041
|
||||
minpercent = 50.0
|
||||
maxpercent = 50.0
|
||||
midval = 0.040
|
||||
minpercent = 60.0
|
||||
maxpercent = 60.0
|
||||
|
||||
@@ -80,7 +80,7 @@ filesystem components to generate an initrd.
|
||||
3. When generating an image, the initrd is extracted to obtain the base rootfs for
|
||||
the image.
|
||||
|
||||
Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|clearlinux|debian|ubuntu|cbl-mariner`.
|
||||
Ubuntu is the default distro for building the rootfs, to use a different one, you can set `DISTRO=alpine|debian|ubuntu|cbl-mariner`.
|
||||
For example `make USE_DOCKER=true DISTRO=alpine rootfs` will make an Alpine rootfs using Docker.
|
||||
|
||||
### Rootfs creation
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
ARG IMAGE_REGISTRY=docker.io
|
||||
FROM ${IMAGE_REGISTRY}/fedora:30
|
||||
|
||||
@SET_PROXY@
|
||||
|
||||
RUN dnf -y update && dnf install -y \
|
||||
autoconf \
|
||||
automake \
|
||||
binutils \
|
||||
chrony \
|
||||
coreutils \
|
||||
curl \
|
||||
curl \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
git \
|
||||
glibc-common \
|
||||
glibc-devel \
|
||||
glibc-headers \
|
||||
glibc-static \
|
||||
glibc-utils \
|
||||
libstdc++-devel \
|
||||
libstdc++-static \
|
||||
m4 \
|
||||
make \
|
||||
pkgconfig \
|
||||
sed \
|
||||
systemd \
|
||||
tar \
|
||||
vim \
|
||||
which && \
|
||||
dnf clean all
|
||||
|
||||
# This will install the proper packages to build Kata components
|
||||
@INSTALL_RUST@
|
||||
@@ -1,31 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
OS_NAME="Clear"
|
||||
REPO_NAME="clear"
|
||||
|
||||
OS_VERSION=${OS_VERSION:-latest}
|
||||
|
||||
clr_url="https://download.clearlinux.org"
|
||||
|
||||
# resolve version
|
||||
[ "${OS_VERSION}" = "latest" ] && OS_VERSION=$(curl -sL "${clr_url}/latest")
|
||||
|
||||
BASE_URL="${clr_url}/releases/${OS_VERSION}/${REPO_NAME}/${ARCH}/os/"
|
||||
|
||||
PACKAGES="libudev0-shim kmod-bin iptables-bin"
|
||||
|
||||
#Optional packages:
|
||||
# systemd: An init system that will start kata-agent if kata-agent
|
||||
# itself is not configured as init process.
|
||||
[ "$AGENT_INIT" = "no" ] && PACKAGES+=" systemd chrony util-linux-bin" || true
|
||||
|
||||
# Init process must be one of {systemd,kata-agent}
|
||||
INIT_PROCESS=systemd
|
||||
# List of zero or more architectures to exclude from build,
|
||||
# as reported by `uname -m`
|
||||
ARCH_EXCLUDE_LIST=( aarch64 ppc64le s390x )
|
||||
|
||||
[ "$SECCOMP" = "yes" ] && PACKAGES+=" libseccomp" || true
|
||||
339
tools/osbuilder/rootfs-builder/nvidia/nvidia_chroot.sh
Normal file
339
tools/osbuilder/rootfs-builder/nvidia/nvidia_chroot.sh
Normal file
@@ -0,0 +1,339 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2024 NVIDIA Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
shopt -s nullglob
|
||||
shopt -s extglob
|
||||
|
||||
run_file_name=$2
|
||||
run_fm_file_name=$3
|
||||
arch_target=$4
|
||||
driver_version="$5"
|
||||
driver_type="open"
|
||||
supported_gpu_devids="/supported-gpu.devids"
|
||||
|
||||
APT_INSTALL="apt -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' -yqq --no-install-recommends install"
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
install_nvidia_ctk() {
|
||||
echo "chroot: Installing NVIDIA GPU container runtime"
|
||||
apt list nvidia-container-toolkit-base -a
|
||||
# Base gives a nvidia-ctk and the nvidia-container-runtime
|
||||
eval "${APT_INSTALL}" nvidia-container-toolkit-base
|
||||
}
|
||||
|
||||
install_nvidia_fabricmanager() {
|
||||
# if run_fm_file_name exists run it
|
||||
if [ -f /"${run_fm_file_name}" ]; then
|
||||
install_nvidia_fabricmanager_from_run_file
|
||||
else
|
||||
install_nvidia_fabricmanager_from_distribution
|
||||
fi
|
||||
}
|
||||
|
||||
install_nvidia_fabricmanager_from_run_file() {
|
||||
echo "chroot: Install NVIDIA fabricmanager from run file"
|
||||
pushd / >> /dev/null
|
||||
chmod +x "${run_fm_file_name}"
|
||||
./"${run_fm_file_name}" --nox11
|
||||
popd >> /dev/null
|
||||
}
|
||||
|
||||
install_nvidia_fabricmanager_from_distribution() {
|
||||
echo "chroot: Install NVIDIA fabricmanager from distribution"
|
||||
eval "${APT_INSTALL}" nvidia-fabricmanager-"${driver_version}" libnvidia-nscq-"${driver_version}"
|
||||
apt-mark hold nvidia-fabricmanager-"${driver_version}" libnvidia-nscq-"${driver_version}"
|
||||
}
|
||||
|
||||
build_nvidia_drivers() {
|
||||
echo "chroot: Build NVIDIA drivers"
|
||||
pushd "${driver_source_files}" >> /dev/null
|
||||
|
||||
local kernel_version
|
||||
for version in /lib/modules/*; do
|
||||
kernel_version=$(basename "${version}")
|
||||
echo "chroot: Building GPU modules for: ${kernel_version}"
|
||||
cp /boot/System.map-"${kernel_version}" /lib/modules/"${kernel_version}"/build/System.map
|
||||
|
||||
if [ "${arch_target}" == "aarch64" ]; then
|
||||
ln -sf /lib/modules/"${kernel_version}"/build/arch/arm64 /lib/modules/"${kernel_version}"/build/arch/aarch64
|
||||
fi
|
||||
|
||||
if [ "${arch_target}" == "x86_64" ]; then
|
||||
ln -sf /lib/modules/"${kernel_version}"/build/arch/x86 /lib/modules/"${kernel_version}"/build/arch/amd64
|
||||
fi
|
||||
|
||||
make -j "$(nproc)" CC=gcc SYSSRC=/lib/modules/"${kernel_version}"/build > /dev/null
|
||||
make INSTALL_MOD_STRIP=1 -j "$(nproc)" CC=gcc SYSSRC=/lib/modules/"${kernel_version}"/build modules_install
|
||||
make -j "$(nproc)" CC=gcc SYSSRC=/lib/modules/"${kernel_version}"/build clean > /dev/null
|
||||
|
||||
done
|
||||
# Save the modules for later so that a linux-image purge does not remove it
|
||||
tar cvfa /lib/modules.save_from_purge.tar.zst /lib/modules
|
||||
popd >> /dev/null
|
||||
}
|
||||
|
||||
install_userspace_components() {
|
||||
if [ ! -f /"${run_file_name}" ]; then
|
||||
echo "chroot: Skipping NVIDIA userspace runfile components installation"
|
||||
return
|
||||
fi
|
||||
|
||||
pushd /NVIDIA-* >> /dev/null
|
||||
# if aarch64 we need to remove --no-install-compat32-libs
|
||||
if [ "${arch_target}" == "aarch64" ]; then
|
||||
./nvidia-installer --no-kernel-modules --no-systemd --no-nvidia-modprobe -s --x-prefix=/root
|
||||
else
|
||||
./nvidia-installer --no-kernel-modules --no-systemd --no-nvidia-modprobe -s --x-prefix=/root --no-install-compat32-libs
|
||||
fi
|
||||
popd >> /dev/null
|
||||
|
||||
}
|
||||
|
||||
prepare_run_file_drivers() {
|
||||
if [ "${driver_version}" == "latest" ]; then
|
||||
driver_version=""
|
||||
echo "chroot: Resetting driver version not supported with run-file"
|
||||
elif [ "${driver_version}" == "lts" ]; then
|
||||
driver_version=""
|
||||
echo "chroot: Resetting driver version not supported with run-file"
|
||||
fi
|
||||
|
||||
|
||||
echo "chroot: Prepare NVIDIA run file drivers"
|
||||
pushd / >> /dev/null
|
||||
chmod +x "${run_file_name}"
|
||||
./"${run_file_name}" -x
|
||||
|
||||
mkdir -p /usr/share/nvidia/rim/
|
||||
|
||||
# Sooner or later RIM files will be only available remotely
|
||||
RIMFILE=$(ls NVIDIA-*/RIM_GH100PROD.swidtag)
|
||||
if [ -e "${RIMFILE}" ]; then
|
||||
cp NVIDIA-*/RIM_GH100PROD.swidtag /usr/share/nvidia/rim/.
|
||||
fi
|
||||
popd >> /dev/null
|
||||
}
|
||||
|
||||
prepare_distribution_drivers() {
|
||||
if [ "${driver_version}" == "latest" ]; then
|
||||
driver_version=$(apt-cache search --names-only 'nvidia-headless-no-dkms-.?.?.?-open' | awk '{ print $1 }' | tail -n 1 | cut -d'-' -f5)
|
||||
elif [ "${driver_version}" == "lts" ]; then
|
||||
driver_version="550"
|
||||
fi
|
||||
|
||||
echo "chroot: Prepare NVIDIA distribution drivers"
|
||||
eval "${APT_INSTALL}" nvidia-headless-no-dkms-"${driver_version}-${driver_type}" \
|
||||
libnvidia-cfg1-"${driver_version}" \
|
||||
nvidia-compute-utils-"${driver_version}" \
|
||||
nvidia-utils-"${driver_version}" \
|
||||
nvidia-kernel-common-"${driver_version}" \
|
||||
nvidia-imex-"${driver_version}" \
|
||||
libnvidia-compute-"${driver_version}" \
|
||||
libnvidia-compute-"${driver_version}" \
|
||||
libnvidia-gl-"${driver_version}" \
|
||||
libnvidia-extra-"${driver_version}" \
|
||||
libnvidia-decode-"${driver_version}" \
|
||||
libnvidia-fbc1-"${driver_version}" \
|
||||
libnvidia-encode-"${driver_version}"
|
||||
}
|
||||
|
||||
prepare_nvidia_drivers() {
|
||||
local driver_source_dir=""
|
||||
|
||||
if [ -f /"${run_file_name}" ]; then
|
||||
prepare_run_file_drivers
|
||||
|
||||
for source_dir in /NVIDIA-*; do
|
||||
if [ -d "${source_dir}" ]; then
|
||||
driver_source_files="${source_dir}"/kernel-${driver_type}
|
||||
driver_source_dir="${source_dir}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
get_supported_gpus_from_run_file "${driver_source_dir}"
|
||||
|
||||
else
|
||||
prepare_distribution_drivers
|
||||
|
||||
for source_dir in /usr/src/nvidia*; do
|
||||
if [ -d "${source_dir}" ]; then
|
||||
driver_source_files="${source_dir}"
|
||||
driver_source_dir="${source_dir}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
get_supported_gpus_from_distro_drivers "${driver_source_dir}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
install_build_dependencies() {
|
||||
echo "chroot: Install NVIDIA drivers build dependencies"
|
||||
eval "${APT_INSTALL}" make gcc gawk kmod libvulkan1 pciutils jq zstd linuxptp
|
||||
}
|
||||
|
||||
setup_apt_repositories() {
|
||||
echo "chroot: Setup APT repositories"
|
||||
mkdir -p /var/cache/apt/archives/partial
|
||||
mkdir -p /var/log/apt
|
||||
mkdir -p /var/lib/dpkg/info
|
||||
mkdir -p /var/lib/dpkg/updates
|
||||
mkdir -p /var/lib/dpkg/alternatives
|
||||
mkdir -p /var/lib/dpkg/triggers
|
||||
mkdir -p /var/lib/dpkg/parts
|
||||
touch /var/lib/dpkg/status
|
||||
rm -f /etc/apt/sources.list.d/*
|
||||
|
||||
if [ "${arch_target}" == "aarch64" ]; then
|
||||
cat <<-'CHROOT_EOF' > /etc/apt/sources.list.d/jammy.list
|
||||
deb http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
|
||||
deb http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
|
||||
deb http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
|
||||
deb http://ports.ubuntu.com/ubuntu-ports/ jammy-backports main restricted universe multiverse
|
||||
CHROOT_EOF
|
||||
else
|
||||
cat <<-'CHROOT_EOF' > /etc/apt/sources.list.d/noble.list
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ jammy main restricted universe multiverse
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ jammy-updates main restricted universe multiverse
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ jammy-security main restricted universe multiverse
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse
|
||||
CHROOT_EOF
|
||||
fi
|
||||
|
||||
apt update
|
||||
|
||||
eval "${APT_INSTALL}" curl gpg ca-certificates
|
||||
|
||||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list |
|
||||
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' |
|
||||
tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||
|
||||
apt update
|
||||
}
|
||||
|
||||
install_kernel_dependencies() {
|
||||
dpkg -i /linux-*deb
|
||||
}
|
||||
|
||||
get_supported_gpus_from_run_file() {
|
||||
local source_dir="$1"
|
||||
local supported_gpus_json="${source_dir}"/supported-gpus/supported-gpus.json
|
||||
|
||||
jq . < "${supported_gpus_json}" | grep '"devid"' | awk '{ print $2 }' | tr -d ',"' > ${supported_gpu_devids}
|
||||
}
|
||||
|
||||
get_supported_gpus_from_distro_drivers() {
|
||||
local supported_gpus_json=/usr/share/doc/nvidia-kernel-common-"${driver_version}"/supported-gpus.json
|
||||
|
||||
jq . < "${supported_gpus_json}" | grep '"devid"' | awk '{ print $2 }' | tr -d ',"' > ${supported_gpu_devids}
|
||||
}
|
||||
|
||||
export_driver_version() {
|
||||
for modules_version in /lib/modules/*; do
|
||||
modinfo "${modules_version}"/kernel/drivers/video/nvidia.ko | grep ^version | awk '{ print $2 }' > /nvidia_driver_version
|
||||
break
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
install_nvidia_dcgm() {
|
||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb
|
||||
dpkg -i cuda-keyring_1.0-1_all.deb && rm -f cuda-keyring_1.0-1_all.deb
|
||||
|
||||
if [ "${arch_target}" == "aarch64" ]; then
|
||||
cat <<-'CHROOT_EOF' > /etc/apt/sources.list.d/cuda.list
|
||||
deb [signed-by=/usr/share/keyrings/cuda-archive-keyring.gpg] https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/ /
|
||||
CHROOT_EOF
|
||||
else
|
||||
cat <<-'CHROOT_EOF' > /etc/apt/sources.list.d/cuda.list
|
||||
deb [signed-by=/usr/share/keyrings/cuda-archive-keyring.gpg] https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /
|
||||
CHROOT_EOF
|
||||
fi
|
||||
apt update
|
||||
eval "${APT_INSTALL}" datacenter-gpu-manager
|
||||
}
|
||||
|
||||
cleanup_rootfs() {
|
||||
echo "chroot: Cleanup NVIDIA GPU rootfs"
|
||||
|
||||
apt-mark hold libstdc++6 libzstd1 libgnutls30 pciutils
|
||||
# noble=libgnutls30t64
|
||||
|
||||
if [ -n "${driver_version}" ]; then
|
||||
apt-mark hold libnvidia-cfg1-"${driver_version}" \
|
||||
nvidia-compute-utils-"${driver_version}" \
|
||||
nvidia-utils-"${driver_version}" \
|
||||
nvidia-kernel-common-"${driver_version}" \
|
||||
nvidia-imex-"${driver_version}" \
|
||||
libnvidia-compute-"${driver_version}" \
|
||||
libnvidia-compute-"${driver_version}" \
|
||||
libnvidia-gl-"${driver_version}" \
|
||||
libnvidia-extra-"${driver_version}" \
|
||||
libnvidia-decode-"${driver_version}" \
|
||||
libnvidia-fbc1-"${driver_version}" \
|
||||
libnvidia-encode-"${driver_version}" \
|
||||
libnvidia-nscq-"${driver_version}" \
|
||||
linuxptp libnftnl11
|
||||
fi
|
||||
|
||||
kernel_headers=$(dpkg --get-selections | cut -f1 | grep linux-headers)
|
||||
linux_images=$(dpkg --get-selections | cut -f1 | grep linux-image)
|
||||
for i in ${kernel_headers} ${linux_images}; do
|
||||
apt purge -yqq "${i}"
|
||||
done
|
||||
|
||||
apt purge -yqq jq make gcc wget libc6-dev git xz-utils curl gpg \
|
||||
python3-pip software-properties-common ca-certificates \
|
||||
linux-libc-dev nuitka python3-minimal cuda-keyring
|
||||
|
||||
if [ -n "${driver_version}" ]; then
|
||||
apt purge -yqq nvidia-headless-no-dkms-"${driver_version}-${driver_type}" \
|
||||
nvidia-kernel-source-"${driver_version}-${driver_type}" -yqq
|
||||
fi
|
||||
|
||||
apt autoremove -yqq
|
||||
|
||||
apt clean
|
||||
apt autoclean
|
||||
|
||||
for modules_version in /lib/modules/*; do
|
||||
ln -sf "${modules_version}" /lib/modules/"$(uname -r)"
|
||||
touch "${modules_version}"/modules.order
|
||||
touch "${modules_version}"/modules.builtin
|
||||
depmod -a
|
||||
done
|
||||
|
||||
rm -rf /etc/apt/sources.list* /var/lib/apt /var/log/apt /var/cache/debconf
|
||||
rm -f /usr/bin/nvidia-ngx-updater /usr/bin/nvidia-container-runtime
|
||||
rm -f /var/log/{nvidia-installer.log,dpkg.log,alternatives.log}
|
||||
|
||||
# Clear and regenerate the ld cache
|
||||
rm -f /etc/ld.so.cache
|
||||
ldconfig
|
||||
|
||||
tar xvf /lib/modules.save_from_purge.tar.zst -C /
|
||||
|
||||
}
|
||||
# Start of script
|
||||
echo "chroot: Setup NVIDIA GPU rootfs stage one"
|
||||
|
||||
|
||||
setup_apt_repositories
|
||||
install_kernel_dependencies
|
||||
install_build_dependencies
|
||||
prepare_nvidia_drivers
|
||||
build_nvidia_drivers
|
||||
install_userspace_components
|
||||
install_nvidia_fabricmanager
|
||||
install_nvidia_ctk
|
||||
export_driver_version
|
||||
install_nvidia_dcgm
|
||||
cleanup_rootfs
|
||||
348
tools/osbuilder/rootfs-builder/nvidia/nvidia_rootfs.sh
Normal file
348
tools/osbuilder/rootfs-builder/nvidia/nvidia_rootfs.sh
Normal file
@@ -0,0 +1,348 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2024 NVIDIA Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
readonly BUILD_DIR="/kata-containers/tools/packaging/kata-deploy/local-build/build/"
|
||||
# catch errors and then assign
|
||||
script_dir="$(dirname "$(readlink -f "$0")")"
|
||||
readonly SCRIPT_DIR="${script_dir}/nvidia"
|
||||
|
||||
# This will control how much output the inird/image will produce
|
||||
DEBUG=""
|
||||
|
||||
setup_nvidia-nvrc() {
|
||||
local TARGET="nvidia-nvrc"
|
||||
local PROJECT="nvrc"
|
||||
local TARGET_BUILD_DIR="${BUILD_DIR}/${TARGET}/builddir"
|
||||
local TARGET_DEST_DIR="${BUILD_DIR}/${TARGET}/destdir"
|
||||
local TARBALL="${BUILD_DIR}/kata-static-${TARGET}.tar.zst"
|
||||
|
||||
mkdir -p "${TARGET_BUILD_DIR}"
|
||||
mkdir -p "${TARGET_DEST_DIR}/bin"
|
||||
|
||||
pushd "${TARGET_BUILD_DIR}" > /dev/null || exit 1
|
||||
|
||||
rm -rf "${PROJECT}"
|
||||
git clone https://github.com/NVIDIA/${PROJECT}.git
|
||||
|
||||
pushd "${PROJECT}" > /dev/null || exit 1
|
||||
|
||||
cargo build --release --target=x86_64-unknown-linux-musl
|
||||
cp target/x86_64-unknown-linux-musl/release/NVRC ../../destdir/bin/.
|
||||
|
||||
popd > /dev/null || exit 1
|
||||
|
||||
tar cvfa "${TARBALL}" -C ../destdir .
|
||||
tar tvf "${TARBALL}"
|
||||
|
||||
popd > /dev/null || exit 1
|
||||
}
|
||||
|
||||
setup_nvidia-gpu-admin-tools() {
|
||||
local TARGET="nvidia-gpu-admin-tools"
|
||||
local TARGET_GIT="https://github.com/NVIDIA/gpu-admin-tools"
|
||||
local TARGET_BUILD_DIR="${BUILD_DIR}/${TARGET}/builddir"
|
||||
local TARGET_DEST_DIR="${BUILD_DIR}/${TARGET}/destdir"
|
||||
local TARBALL="${BUILD_DIR}/kata-static-${TARGET}.tar.zst"
|
||||
|
||||
mkdir -p "${TARGET_BUILD_DIR}"
|
||||
mkdir -p "${TARGET_DEST_DIR}/sbin"
|
||||
|
||||
pushd "${TARGET_BUILD_DIR}" > /dev/null || exit 1
|
||||
|
||||
rm -rf "$(basename ${TARGET_GIT})"
|
||||
git clone ${TARGET_GIT}
|
||||
|
||||
rm -rf dist
|
||||
# Installed via pipx local python environment
|
||||
"${HOME}"/local/bin/pyinstaller -s -F gpu-admin-tools/nvidia_gpu_tools.py
|
||||
|
||||
cp dist/nvidia_gpu_tools ../destdir/sbin/.
|
||||
|
||||
tar cvfa "${TARBALL}" -C ../destdir .
|
||||
tar tvf "${TARBALL}"
|
||||
|
||||
popd > /dev/null || exit 1
|
||||
}
|
||||
|
||||
setup_nvidia-dcgm-exporter() {
|
||||
local TARGET="nvidia-dcgm-exporter"
|
||||
local TARGET_BUILD_DIR="${BUILD_DIR}/${TARGET}/builddir"
|
||||
local TARGET_DEST_DIR="${BUILD_DIR}/${TARGET}/destdir"
|
||||
local TARBALL="${BUILD_DIR}/kata-static-${TARGET}.tar.zst"
|
||||
|
||||
mkdir -p "${TARGET_BUILD_DIR}"
|
||||
mkdir -p "${TARGET_DEST_DIR}/bin"
|
||||
mkdir -p "${TARGET_DEST_DIR}/etc"
|
||||
|
||||
pushd "${TARGET_BUILD_DIR}" > /dev/null || exit 1
|
||||
|
||||
local dex="dcgm-exporter"
|
||||
|
||||
rm -rf "${dex}"
|
||||
git clone https://github.com/NVIDIA/${dex}
|
||||
make -C ${dex} binary
|
||||
|
||||
mkdir -p ../destdir/bin
|
||||
mkdir -p ../destdir/etc/${dex}
|
||||
|
||||
cp ${dex}/cmd/${dex}/${dex} ../destdir/bin/.
|
||||
cp ${dex}/etc/*.csv ../destdir/etc/${dex}/.
|
||||
|
||||
tar cvfa "${TARBALL}" -C ../destdir .
|
||||
tar tvf "${TARBALL}"
|
||||
|
||||
popd > /dev/null || exit 1
|
||||
}
|
||||
|
||||
setup_nvidia_gpu_rootfs_stage_one() {
|
||||
if [ -e "${BUILD_DIR}/kata-static-nvidia-gpu-rootfs-stage-one.tar.zst" ]; then
|
||||
info "nvidia: GPU rootfs stage one already exists"
|
||||
return
|
||||
fi
|
||||
|
||||
pushd "${ROOTFS_DIR:?}" >> /dev/null
|
||||
|
||||
local rootfs_type=${1:-""}
|
||||
|
||||
info "nvidia: Setup GPU rootfs type=$rootfs_type"
|
||||
|
||||
for component in "nvidia-gpu-admin-tools" "nvidia-dcgm-exporter" "nvidia-nvrc"; do
|
||||
if [ ! -e "${BUILD_DIR}/kata-static-${component}.tar.zst" ]; then
|
||||
setup_${component}
|
||||
fi
|
||||
done
|
||||
|
||||
cp "${SCRIPT_DIR}/nvidia_chroot.sh" ./nvidia_chroot.sh
|
||||
|
||||
chmod +x ./nvidia_chroot.sh
|
||||
|
||||
local appendix=""
|
||||
if [ "$rootfs_type" == "confidential" ]; then
|
||||
appendix="-${rootfs_type}"
|
||||
fi
|
||||
if echo "$NVIDIA_GPU_STACK" | grep -q '\<dragonball\>'; then
|
||||
appendix="-dragonball-experimental"
|
||||
fi
|
||||
|
||||
# We need the kernel packages for building the drivers cleanly will be
|
||||
# deinstalled and removed from the roofs once the build finishes.
|
||||
tar -xvf ${BUILD_DIR}/kata-static-kernel-nvidia-gpu"${appendix}"-headers.tar.xz -C .
|
||||
|
||||
# If we find a local downloaded run file build the kernel modules
|
||||
# with it, otherwise use the distribution packages. Run files may have
|
||||
# more recent drivers available then the distribution packages.
|
||||
local run_file_name="nvidia-driver.run"
|
||||
if [ -f ${BUILD_DIR}/${run_file_name} ]; then
|
||||
cp -L ${BUILD_DIR}/${run_file_name} ./${run_file_name}
|
||||
fi
|
||||
|
||||
local run_fm_file_name="nvidia-fabricmanager.run"
|
||||
if [ -f ${BUILD_DIR}/${run_fm_file_name} ]; then
|
||||
cp -L ${BUILD_DIR}/${run_fm_file_name} ./${run_fm_file_name}
|
||||
fi
|
||||
|
||||
mount --rbind /dev ./dev
|
||||
mount --make-rslave ./dev
|
||||
mount -t proc /proc ./proc
|
||||
|
||||
local driver_version="latest"
|
||||
if echo "$NVIDIA_GPU_STACK" | grep -q '\<latest\>'; then
|
||||
driver_version="latest"
|
||||
elif echo "$NVIDIA_GPU_STACK" | grep -q '\<lts\>'; then
|
||||
driver_version="lts"
|
||||
fi
|
||||
|
||||
chroot . /bin/bash -c "/nvidia_chroot.sh $(uname -r) ${run_file_name} ${run_fm_file_name} ${ARCH} ${driver_version}"
|
||||
|
||||
umount -R ./dev
|
||||
umount ./proc
|
||||
|
||||
rm ./nvidia_chroot.sh
|
||||
rm ./*.deb
|
||||
|
||||
tar cfa "${BUILD_DIR}"/kata-static-rootfs-nvidia-gpu-stage-one.tar.zst --remove-files -- *
|
||||
|
||||
popd >> /dev/null
|
||||
|
||||
pushd "${BUILD_DIR}" >> /dev/null
|
||||
curl -LO https://github.com/upx/upx/releases/download/v4.2.4/upx-4.2.4-amd64_linux.tar.xz
|
||||
tar xvf upx-4.2.4-amd64_linux.tar.xz
|
||||
popd >> /dev/null
|
||||
}
|
||||
|
||||
chisseled_iptables() {
|
||||
echo "nvidia: chisseling iptables"
|
||||
cp -a "${stage_one}"/usr/sbin/xtables-nft-multi sbin/.
|
||||
|
||||
ln -s ../sbin/xtables-nft-multi sbin/iptables-restore
|
||||
ln -s ../sbin/xtables-nft-multi sbin/iptables-save
|
||||
|
||||
libdir="lib/x86_64-linux-gnu"
|
||||
cp -a "${stage_one}"/${libdir}/libmnl.so.0* lib/.
|
||||
|
||||
libdir="usr/lib/x86_64-linux-gnu"
|
||||
cp -a "${stage_one}"/${libdir}/libnftnl.so.11* lib/.
|
||||
cp -a "${stage_one}"/${libdir}/libxtables.so.12* lib/.
|
||||
}
|
||||
|
||||
chisseled_nvswitch() {
|
||||
echo "nvidia: chisseling NVSwitch"
|
||||
echo "nvidia: not implemented yet"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chisseled_dcgm() {
|
||||
echo "nvidia: chisseling DCGM"
|
||||
|
||||
mkdir -p etc/dcgm-exporter
|
||||
libdir="lib/x86_64-linux-gnu"
|
||||
|
||||
cp -a "${stage_one}"/usr/${libdir}/libdcgm.* ${libdir}/.
|
||||
cp -a "${stage_one}"/${libdir}/libgcc_s.so.1* ${libdir}/.
|
||||
cp -a "${stage_one}"/usr/bin/nv-hostengine bin/.
|
||||
|
||||
tar xvf "${BUILD_DIR}"/kata-static-nvidia-dcgm-exporter.tar.zst -C .
|
||||
}
|
||||
|
||||
# copute always includes utility per default
|
||||
chisseled_compute() {
|
||||
echo "nvidia: chisseling GPU"
|
||||
|
||||
cp -a "${stage_one}"/nvidia_driver_version .
|
||||
|
||||
tar xvf "${BUILD_DIR}"/kata-static-nvidia-gpu-admin-tools.tar.zst -C .
|
||||
|
||||
cp -a "${stage_one}"/lib/modules/* lib/modules/.
|
||||
|
||||
libdir="lib/x86_64-linux-gnu"
|
||||
cp -a "${stage_one}"/${libdir}/libdl.so.2* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libz.so.1* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libpthread.so.0* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libresolv.so.2* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libc.so.6* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libm.so.6* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/librt.so.1* lib/x86_64-linux-gnu/.
|
||||
|
||||
libdir="lib64"
|
||||
cp -aL "${stage_one}"/${libdir}/ld-linux-x86-64.so.* lib64/.
|
||||
|
||||
libdir="usr/lib/x86_64-linux-gnu"
|
||||
cp -a "${stage_one}"/${libdir}/libnvidia-ml.so.* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libcuda.so.* lib/x86_64-linux-gnu/.
|
||||
cp -a "${stage_one}"/${libdir}/libnvidia-cfg.so.* lib/x86_64-linux-gnu/.
|
||||
|
||||
# basich GPU admin tools
|
||||
cp -a "${stage_one}"/usr/bin/nvidia-persistenced bin/.
|
||||
cp -a "${stage_one}"/usr/bin/nvidia-smi bin/.
|
||||
cp -a "${stage_one}"/usr/bin/nvidia-ctk bin/.
|
||||
cp -a "${stage_one}"/usr/bin/nvidia-cdi-hook bin/.
|
||||
ln -s ../bin usr/bin
|
||||
}
|
||||
|
||||
chisseled_gpudirect() {
|
||||
echo "nvidia: chisseling GPUDirect"
|
||||
echo "nvidia: not implemented yet"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chisseled_init() {
|
||||
echo "nvidia: chisseling init"
|
||||
tar xvf "${BUILD_DIR}"/kata-static-busybox.tar.xz -C .
|
||||
|
||||
mkdir -p dev etc proc run/cdi sys tmp usr var lib/modules lib/firmware \
|
||||
usr/share/nvidia lib/x86_64-linux-gnu lib64
|
||||
|
||||
ln -sf ../run var/run
|
||||
|
||||
tar xvf "${BUILD_DIR}"/kata-static-nvidia-nvrc.tar.zst -C .
|
||||
|
||||
ln -sf /bin/NVRC init
|
||||
|
||||
cp -a "${stage_one}"/sbin/init sbin/.
|
||||
cp -a "${stage_one}"/etc/kata-opa etc/.
|
||||
cp -a "${stage_one}"/etc/resolv.conf etc/.
|
||||
cp -a "${stage_one}"/supported-gpu.devids .
|
||||
|
||||
cp -a "${stage_one}"/lib/firmware/nvidia lib/firmware/.
|
||||
cp -a "${stage_one}"/sbin/ldconfig.real sbin/ldconfig
|
||||
}
|
||||
|
||||
compress_rootfs() {
|
||||
echo "nvidia: compressing rootfs"
|
||||
|
||||
# For some unobvious reason libc has executable bit set
|
||||
# clean this up otherwise the find -executable will not work correctly
|
||||
find . -type f -name "*.so.*" | while IFS= read -r file; do
|
||||
chmod -x "${file}"
|
||||
strip "${file}"
|
||||
done
|
||||
|
||||
find . -type f -executable | while IFS= read -r file; do
|
||||
strip "${file}"
|
||||
${BUILD_DIR}/upx-4.2.4-amd64_linux/upx --best --lzma "${file}"
|
||||
done
|
||||
|
||||
# While I was playing with compression the executable flag on
|
||||
# /lib64/ld-linux-x86-64.so.2 was lost...
|
||||
# Since this is the program interpreter, it needs to be executable
|
||||
# as well.. sigh
|
||||
chmod +x lib64/ld-linux-x86-64.so.2
|
||||
|
||||
}
|
||||
|
||||
toggle_debug() {
|
||||
if echo "$NVIDIA_GPU_STACK" | grep -q '\<debug\>'; then
|
||||
export DEBUG="true"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_nvidia_gpu_rootfs_stage_two() {
|
||||
readonly stage_one="${BUILD_DIR:?}/rootfs-${VARIANT}-stage-one"
|
||||
readonly stage_two="${ROOTFS_DIR:?}"
|
||||
readonly stack="${NVIDIA_GPU_STACK:?}"
|
||||
|
||||
echo "nvidia: chisseling the following stack components: $stack"
|
||||
|
||||
|
||||
[ -e "${stage_one}" ] && rm -rf "${stage_one}"
|
||||
[ ! -e "${stage_one}" ] && mkdir -p "${stage_one}"
|
||||
|
||||
tar -C "${stage_one}" -xf ${BUILD_DIR}/kata-static-rootfs-nvidia-gpu-stage-one.tar.zst
|
||||
|
||||
|
||||
pushd "${stage_two}" >> /dev/null
|
||||
|
||||
toggle_debug
|
||||
chisseled_init
|
||||
chisseled_iptables
|
||||
|
||||
IFS=',' read -r -a stack_components <<< "$NVIDIA_GPU_STACK"
|
||||
|
||||
for component in "${stack_components[@]}"; do
|
||||
if [ "$component" = "compute" ]; then
|
||||
echo "nvidia: processing \"compute\" component"
|
||||
chisseled_compute
|
||||
elif [ "$component" = "dcgm" ]; then
|
||||
echo "nvidia: processing DCGM component"
|
||||
chisseled_dcgm
|
||||
elif [ "$component" = "nvswitch" ]; then
|
||||
echo "nvidia: processing NVSwitch component"
|
||||
chisseled_nvswitch
|
||||
elif [ "$component" = "gpudirect" ]; then
|
||||
echo "nvidia: processing GPUDirect component"
|
||||
chisseled_gpudirect
|
||||
fi
|
||||
done
|
||||
|
||||
compress_rootfs
|
||||
|
||||
chroot . ldconfig
|
||||
|
||||
popd >> /dev/null
|
||||
}
|
||||
@@ -43,6 +43,10 @@ if [[ "${AGENT_POLICY}" == "yes" ]]; then
|
||||
agent_policy_file="$(readlink -f -v "${AGENT_POLICY_FILE:-"${script_dir}/../../../src/kata-opa/allow-all.rego"}")"
|
||||
fi
|
||||
|
||||
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-""}
|
||||
nvidia_rootfs="${script_dir}/nvidia/nvidia_rootfs.sh"
|
||||
source "$nvidia_rootfs"
|
||||
|
||||
#For cross build
|
||||
CROSS_BUILD=${CROSS_BUILD:-false}
|
||||
BUILDX=""
|
||||
@@ -516,6 +520,7 @@ build_rootfs_distro()
|
||||
--env EXTRA_PKGS="${EXTRA_PKGS}" \
|
||||
--env OSBUILDER_VERSION="${OSBUILDER_VERSION}" \
|
||||
--env OS_VERSION="${OS_VERSION}" \
|
||||
--env VARIANT="${VARIANT}" \
|
||||
--env INSIDE_CONTAINER=1 \
|
||||
--env SECCOMP="${SECCOMP}" \
|
||||
--env SELINUX="${SELINUX}" \
|
||||
@@ -525,6 +530,7 @@ build_rootfs_distro()
|
||||
--env HOME="/root" \
|
||||
--env AGENT_POLICY="${AGENT_POLICY}" \
|
||||
--env CONFIDENTIAL_GUEST="${CONFIDENTIAL_GUEST}" \
|
||||
--env NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK}" \
|
||||
-v "${repo_dir}":"/kata-containers" \
|
||||
-v "${ROOTFS_DIR}":"/rootfs" \
|
||||
-v "${script_dir}/../scripts":"/scripts" \
|
||||
@@ -795,9 +801,6 @@ detect_host_distro()
|
||||
"*suse*")
|
||||
distro="suse"
|
||||
;;
|
||||
"clear-linux-os")
|
||||
distro="clearlinux"
|
||||
;;
|
||||
*)
|
||||
distro="$ID"
|
||||
;;
|
||||
@@ -822,6 +825,18 @@ main()
|
||||
|
||||
init="${ROOTFS_DIR}/sbin/init"
|
||||
setup_rootfs
|
||||
|
||||
if [ "${VARIANT}" = "nvidia-gpu" ]; then
|
||||
setup_nvidia_gpu_rootfs_stage_one
|
||||
setup_nvidia_gpu_rootfs_stage_two
|
||||
return $?
|
||||
fi
|
||||
|
||||
if [ "${VARIANT}" = "nvidia-gpu-confidential" ]; then
|
||||
setup_nvidia_gpu_rootfs_stage_one "confidential"
|
||||
setup_nvidia_gpu_rootfs_stage_two "confidential"
|
||||
return $?
|
||||
fi
|
||||
}
|
||||
|
||||
main $*
|
||||
|
||||
@@ -50,6 +50,7 @@ RUN apt-get update && \
|
||||
xz-utils \
|
||||
pip \
|
||||
python3-dev \
|
||||
libclang-dev \
|
||||
zstd && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/&& \
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain ${RUST_TOOLCHAIN}
|
||||
|
||||
@@ -12,7 +12,6 @@ for distro in $(${sdir}/../rootfs-builder/rootfs.sh -l); do
|
||||
distros+=("${distro}")
|
||||
done
|
||||
test_distros=()
|
||||
test_distros+=("clearlinux")
|
||||
test_distros+=("ubuntu")
|
||||
|
||||
skipForRustDistros=()
|
||||
|
||||
@@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: "3.11.0"
|
||||
version: "3.12.0"
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "3.11.0"
|
||||
appVersion: "3.12.0"
|
||||
|
||||
@@ -1,22 +1,38 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
serviceAccountName: {{ .Chart.Name }}-sa-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
serviceAccountName: {{ .Chart.Name }}-sa
|
||||
{{- end }}
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-kata
|
||||
@@ -50,6 +66,8 @@ spec:
|
||||
value: {{ .Values.env.pullTypeMapping | quote }}
|
||||
- name: INSTALLATION_PREFIX
|
||||
value: {{ .Values.env.installationPrefix | quote }}
|
||||
- name: MULTI_INSTALL_SUFFIX
|
||||
value: {{ .Values.env.multiInstallSuffix | quote }}
|
||||
{{- with .Values.env.hostOS }}
|
||||
- name: HOST_OS
|
||||
value: {{ . | quote }}
|
||||
|
||||
@@ -2,13 +2,21 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-sa-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-sa
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-role-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-role
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
@@ -20,12 +28,24 @@ rules:
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-rb-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-rb
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-role-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-role
|
||||
{{- end }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-sa-{{ .Values.env.multiInstallSuffix }}
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-sa
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-sa-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-sa-cleanup
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
@@ -11,7 +15,11 @@ metadata:
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-role-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-role-cleanup
|
||||
{{- end }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-weight": "-2"
|
||||
@@ -23,11 +31,18 @@ rules:
|
||||
- apiGroups: ["node.k8s.io"]
|
||||
resources: ["runtimeclasses"]
|
||||
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
verbs: ["list"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-rb-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-rb-cleanup
|
||||
{{- end }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
"helm.sh/hook-weight": "-1"
|
||||
@@ -35,16 +50,28 @@ metadata:
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-role-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-role-cleanup
|
||||
{{- end }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-sa-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-sa-cleanup
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
name: {{ .Chart.Name }}-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
name: {{ .Chart.Name }}-cleanup
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-delete
|
||||
@@ -56,7 +83,11 @@ spec:
|
||||
labels:
|
||||
role: cleanup
|
||||
spec:
|
||||
{{- if .Values.env.multiInstallSuffix }}
|
||||
serviceAccountName: {{ .Chart.Name }}-sa-{{ .Values.env.multiInstallSuffix }}-cleanup
|
||||
{{- else }}
|
||||
serviceAccountName: {{ .Chart.Name }}-sa-cleanup
|
||||
{{- end }}
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kube-kata-cleanup
|
||||
@@ -90,6 +121,10 @@ spec:
|
||||
value: {{ .Values.env.pullTypeMapping | quote }}
|
||||
- name: HELM_POST_DELETE_HOOK
|
||||
value: "true"
|
||||
- name: INSTALLATION_PREFIX
|
||||
value: {{ .Values.env.installationPrefix | quote }}
|
||||
- name: MULTI_INSTALL_SUFFIX
|
||||
value: {{ .Values.env.multiInstallSuffix | quote }}
|
||||
{{- with .Values.env.hostOS }}
|
||||
- name: HOST_OS
|
||||
value: {{ . | quote }}
|
||||
|
||||
@@ -18,3 +18,4 @@ env:
|
||||
pullTypeMapping: ""
|
||||
installationPrefix: ""
|
||||
hostOS: ""
|
||||
multiInstallSuffix: ""
|
||||
|
||||
@@ -52,6 +52,8 @@ spec:
|
||||
value: ""
|
||||
- name: INSTALLATION_PREFIX
|
||||
value: ""
|
||||
- name: MULTI_INSTALL_SUFFIX
|
||||
value: ""
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
|
||||
@@ -9,4 +9,4 @@ spec:
|
||||
volumes:
|
||||
- name: containerd-conf
|
||||
hostPath:
|
||||
path: /etc/k0s/containerd.d/
|
||||
path: /etc/k0s/
|
||||
|
||||
@@ -16,6 +16,9 @@ rules:
|
||||
- apiGroups: ["node.k8s.io"]
|
||||
resources: ["runtimeclasses"]
|
||||
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
verbs: ["list"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
@@ -54,6 +54,11 @@ define BUILD
|
||||
$(MK_DIR)/kata-deploy-binaries-in-docker.sh $(if $(V),,-s) --build=$1
|
||||
endef
|
||||
|
||||
define DUMMY
|
||||
$(call BUILD,"dummy")
|
||||
mv $(MK_DIR)/build/kata-static-dummy.tar.xz $(MK_DIR)/build/kata-static-$(patsubst %-tarball,%,$1).tar.xz
|
||||
endef
|
||||
|
||||
kata-tarball: | all-parallel merge-builds
|
||||
|
||||
copy-scripts-for-the-agent-build:
|
||||
@@ -80,8 +85,9 @@ agent-tarball: copy-scripts-for-the-agent-build
|
||||
agent-ctl-tarball: copy-scripts-for-the-tools-build
|
||||
${MAKE} $@-build
|
||||
|
||||
BUSYBOX_CONF_FILE ?= busybox.nvidia.conf
|
||||
busybox-tarball:
|
||||
${MAKE} $@-build
|
||||
${MAKE} BUSYBOX_CONF_FILE=${BUSYBOX_CONF_FILE} $@-build
|
||||
|
||||
coco-guest-components-tarball:
|
||||
${MAKE} $@-build
|
||||
@@ -92,6 +98,9 @@ cloud-hypervisor-tarball:
|
||||
cloud-hypervisor-glibc-tarball:
|
||||
${MAKE} $@-build
|
||||
|
||||
csi-kata-directvolume-tarball: copy-scripts-for-the-tools-build
|
||||
${MAKE} $@-build
|
||||
|
||||
firecracker-tarball:
|
||||
${MAKE} $@-build
|
||||
|
||||
@@ -163,6 +172,17 @@ rootfs-initrd-tarball: agent-tarball
|
||||
|
||||
runk-tarball: copy-scripts-for-the-tools-build
|
||||
${MAKE} $@-build
|
||||
rootfs-nvidia-gpu-image-tarball: agent-tarball busybox-tarball
|
||||
${MAKE} $@-build
|
||||
|
||||
rootfs-nvidia-gpu-initrd-tarball: agent-tarball busybox-tarball
|
||||
${MAKE} $@-build
|
||||
|
||||
rootfs-nvidia-gpu-confidential-image-tarball: agent-tarball busybox-tarball pause-image-tarball coco-guest-components-tarball kernel-nvidia-gpu-confidential-tarball
|
||||
${MAKE} $@-build
|
||||
|
||||
rootfs-nvidia-gpu-confidential-initrd-tarball: agent-tarball busybox-tarball pause-image-tarball coco-guest-components-tarball kernel-nvidia-gpu-confidential-tarball
|
||||
${MAKE} $@-build
|
||||
|
||||
shim-v2-tarball:
|
||||
${MAKE} $@-build
|
||||
|
||||
@@ -57,6 +57,9 @@ RUN apt-get update && \
|
||||
cpio \
|
||||
gcc \
|
||||
unzip \
|
||||
git \
|
||||
make \
|
||||
wget \
|
||||
xz-utils && \
|
||||
if [ "${ARCH}" != "$(uname -m)" ] && [ "${ARCH}" == "s390x" ]; then \
|
||||
apt-get install -y --no-install-recommends \
|
||||
|
||||
@@ -102,6 +102,7 @@ MEASURED_ROOTFS="${MEASURED_ROOTFS:-}"
|
||||
PULL_TYPE="${PULL_TYPE:-default}"
|
||||
USE_CACHE="${USE_CACHE:-}"
|
||||
BUSYBOX_CONF_FILE=${BUSYBOX_CONF_FILE:-}
|
||||
NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK:-}"
|
||||
|
||||
docker run \
|
||||
-v $HOME/.docker:/root/.docker \
|
||||
@@ -131,6 +132,7 @@ docker run \
|
||||
--env PULL_TYPE="${PULL_TYPE}" \
|
||||
--env USE_CACHE="${USE_CACHE}" \
|
||||
--env BUSYBOX_CONF_FILE="${BUSYBOX_CONF_FILE}" \
|
||||
--env NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK}" \
|
||||
--env AA_KBC="${AA_KBC:-}" \
|
||||
--env HKD_PATH="$(realpath "${HKD_PATH:-}" 2> /dev/null || true)" \
|
||||
--env SE_KERNEL_PARAMS="${SE_KERNEL_PARAMS:-}" \
|
||||
|
||||
@@ -99,6 +99,7 @@ options:
|
||||
coco-guest-components
|
||||
cloud-hypervisor
|
||||
cloud-hypervisor-glibc
|
||||
csi-kata-directvolume
|
||||
firecracker
|
||||
genpolicy
|
||||
kata-ctl
|
||||
@@ -373,7 +374,7 @@ install_image() {
|
||||
os_name="$(get_from_kata_deps ".assets.image.architecture.${ARCH}.${variant}.name")"
|
||||
os_version="$(get_from_kata_deps ".assets.image.architecture.${ARCH}.${variant}.version")"
|
||||
|
||||
if [ "${variant}" == "confidential" ]; then
|
||||
if [[ "${variant}" == *confidential ]]; then
|
||||
export COCO_GUEST_COMPONENTS_TARBALL="$(get_coco_guest_components_tarball_path)"
|
||||
export PAUSE_IMAGE_TARBALL="$(get_pause_image_tarball_path)"
|
||||
fi
|
||||
@@ -448,7 +449,7 @@ install_initrd() {
|
||||
os_name="$(get_from_kata_deps ".assets.initrd.architecture.${ARCH}.${variant}.name")"
|
||||
os_version="$(get_from_kata_deps ".assets.initrd.architecture.${ARCH}.${variant}.version")"
|
||||
|
||||
if [ "${variant}" == "confidential" ]; then
|
||||
if [[ "${variant}" == *confidential ]]; then
|
||||
export COCO_GUEST_COMPONENTS_TARBALL="$(get_coco_guest_components_tarball_path)"
|
||||
export PAUSE_IMAGE_TARBALL="$(get_pause_image_tarball_path)"
|
||||
fi
|
||||
@@ -470,35 +471,57 @@ install_initrd_confidential() {
|
||||
install_initrd "confidential"
|
||||
}
|
||||
|
||||
#Instal NVIDIA GPU image
|
||||
# For all nvidia_gpu targets we can customize the stack that is enbled
|
||||
# in the VM by setting the NVIDIA_GPU_STACK= environment variable
|
||||
#
|
||||
# latest | lts -> use the latest and greatest driver or lts release
|
||||
# debug -> enable debugging support
|
||||
# compute -> enable the compute GPU stack, includes utility
|
||||
# graphics -> enable the graphics GPU stack, includes compute
|
||||
# dcgm -> enable the DCGM stack + DGCM exporter
|
||||
# nvswitch -> enable DGX like systems
|
||||
# gpudirect -> enable use-cases like GPUDirect RDMA, GPUDirect GDS
|
||||
# dragonball -> enable dragonball support
|
||||
#
|
||||
# The full stack can be enabled by setting all the options like:
|
||||
#
|
||||
# NVIDIA_GPU_STACK="latest,compute,dcgm,nvswitch,gpudirect"
|
||||
#
|
||||
# Install NVIDIA GPU image
|
||||
install_image_nvidia_gpu() {
|
||||
export AGENT_POLICY="yes"
|
||||
export AGENT_INIT="yes"
|
||||
export EXTRA_PKGS="apt udev"
|
||||
export EXTRA_PKGS="apt"
|
||||
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-"latest,compute,dcgm"}
|
||||
install_image "nvidia-gpu"
|
||||
}
|
||||
|
||||
#Install NVIDIA GPU initrd
|
||||
# Install NVIDIA GPU initrd
|
||||
install_initrd_nvidia_gpu() {
|
||||
export AGENT_POLICY="yes"
|
||||
export AGENT_INIT="yes"
|
||||
export EXTRA_PKGS="apt udev"
|
||||
export EXTRA_PKGS="apt"
|
||||
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-"latest,compute,dcgm"}
|
||||
install_initrd "nvidia-gpu"
|
||||
}
|
||||
|
||||
#Instal NVIDIA GPU confidential image
|
||||
# Instal NVIDIA GPU confidential image
|
||||
install_image_nvidia_gpu_confidential() {
|
||||
export AGENT_POLICY="yes"
|
||||
export AGENT_INIT="yes"
|
||||
export EXTRA_PKGS="apt udev"
|
||||
export EXTRA_PKGS="apt"
|
||||
# TODO: export MEASURED_ROOTFS=yes
|
||||
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-"latest,compute"}
|
||||
install_image "nvidia-gpu-confidential"
|
||||
}
|
||||
|
||||
#Install NVIDIA GPU confidential initrd
|
||||
# Install NVIDIA GPU confidential initrd
|
||||
install_initrd_nvidia_gpu_confidential() {
|
||||
export AGENT_POLICY="yes"
|
||||
export AGENT_INIT="yes"
|
||||
export EXTRA_PKGS="apt udev"
|
||||
export EXTRA_PKGS="apt"
|
||||
# TODO: export MEASURED_ROOTFS=yes
|
||||
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-"latest,compute"}
|
||||
install_initrd "nvidia-gpu-confidential"
|
||||
}
|
||||
|
||||
@@ -1000,6 +1023,7 @@ install_tools_helper() {
|
||||
|
||||
tool_binary=${tool}
|
||||
[ ${tool} = "agent-ctl" ] && tool_binary="kata-agent-ctl"
|
||||
[ ${tool} = "csi-kata-directvolume" ] && tool_binary="directvolplugin"
|
||||
[ ${tool} = "trace-forwarder" ] && tool_binary="kata-trace-forwarder"
|
||||
binary=$(find ${repo_root_dir}/src/tools/${tool}/ -type f -name ${tool_binary})
|
||||
|
||||
@@ -1021,6 +1045,7 @@ install_tools_helper() {
|
||||
|
||||
info "Install static ${tool_binary}"
|
||||
mkdir -p "${destdir}/opt/kata/bin/"
|
||||
[ ${tool} = "csi-kata-directvolume" ] && tool_binary="csi-kata-directvolume"
|
||||
install -D --mode ${binary_permissions} ${binary} "${destdir}/opt/kata/bin/${tool_binary}"
|
||||
}
|
||||
|
||||
@@ -1032,6 +1057,10 @@ install_genpolicy() {
|
||||
install_tools_helper "genpolicy"
|
||||
}
|
||||
|
||||
install_csi_kata_directvolume() {
|
||||
install_tools_helper "csi-kata-directvolume"
|
||||
}
|
||||
|
||||
install_kata_ctl() {
|
||||
install_tools_helper "kata-ctl"
|
||||
}
|
||||
@@ -1109,6 +1138,8 @@ handle_build() {
|
||||
|
||||
cloud-hypervisor-glibc) install_clh_glibc ;;
|
||||
|
||||
csi-kata-directvolume) install_csi_kata_directvolume ;;
|
||||
|
||||
firecracker) install_firecracker ;;
|
||||
|
||||
genpolicy) install_genpolicy ;;
|
||||
@@ -1122,6 +1153,7 @@ handle_build() {
|
||||
kernel-confidential) install_kernel_confidential ;;
|
||||
|
||||
kernel-dragonball-experimental) install_kernel_dragonball_experimental ;;
|
||||
|
||||
kernel-nvidia-gpu-dragonball-experimental) install_kernel_nvidia_gpu_dragonball_experimental ;;
|
||||
|
||||
kernel-nvidia-gpu) install_kernel_nvidia_gpu ;;
|
||||
@@ -1168,6 +1200,10 @@ handle_build() {
|
||||
|
||||
virtiofsd) install_virtiofsd ;;
|
||||
|
||||
dummy)
|
||||
tar cvfJ ${final_tarball_path} --files-from /dev/null
|
||||
;;
|
||||
|
||||
*)
|
||||
die "Invalid build target ${build_target}"
|
||||
;;
|
||||
@@ -1323,6 +1359,7 @@ main() {
|
||||
agent-ctl
|
||||
cloud-hypervisor
|
||||
coco-guest-components
|
||||
csi-kata-directvolume
|
||||
firecracker
|
||||
genpolicy
|
||||
kata-ctl
|
||||
@@ -1342,6 +1379,7 @@ main() {
|
||||
shim-v2
|
||||
trace-forwarder
|
||||
virtiofsd
|
||||
dummy
|
||||
)
|
||||
silent=false
|
||||
while getopts "hs-:" opt; do
|
||||
|
||||
@@ -14,6 +14,7 @@ crio_drop_in_conf_file_debug="${crio_drop_in_conf_dir}/100-debug"
|
||||
containerd_conf_file="/etc/containerd/config.toml"
|
||||
containerd_conf_file_backup="${containerd_conf_file}.bak"
|
||||
containerd_conf_tmpl_file=""
|
||||
use_containerd_drop_in_conf_file="false"
|
||||
|
||||
IFS=' ' read -a shims <<< "$SHIMS"
|
||||
default_shim="$DEFAULT_SHIM"
|
||||
@@ -44,6 +45,14 @@ if [ -n "${INSTALLATION_PREFIX}" ]; then
|
||||
# as, otherwise, we'd have it doubled there, as: `/foo/bar//opt/kata`
|
||||
dest_dir="${INSTALLATION_PREFIX}${default_dest_dir}"
|
||||
fi
|
||||
|
||||
MULTI_INSTALL_SUFFIX="${MULTI_INSTALL_SUFFIX:-}"
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
dest_dir="${dest_dir}-${MULTI_INSTALL_SUFFIX}"
|
||||
crio_drop_in_conf_file="${crio_drop_in_conf_file}-${MULTI_INSTALL_SUFFIX}"
|
||||
fi
|
||||
containerd_drop_in_conf_file="${dest_dir}/containerd/config.d/kata-deploy.toml"
|
||||
|
||||
# Here, again, there's no `/` between /host and ${dest_dir}, otherwise we'd have it
|
||||
# doubled here as well, as: `/host//opt/kata`
|
||||
host_install_dir="/host${dest_dir}"
|
||||
@@ -80,10 +89,29 @@ function create_runtimeclasses() {
|
||||
|
||||
for shim in "${shims[@]}"; do
|
||||
echo "Creating the kata-${shim} runtime class"
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
sed -i -e "s|kata-${shim}|kata-${shim}-${MULTI_INSTALL_SUFFIX}|g" /opt/kata-artifacts/runtimeclasses/kata-${shim}.yaml
|
||||
fi
|
||||
kubectl apply -f /opt/kata-artifacts/runtimeclasses/kata-${shim}.yaml
|
||||
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
# Move the file back to its original state, as the deletion is done
|
||||
# differently in the helm and in the kata-deploy daemonset case, meaning
|
||||
# that we should assume those files are always as they were during the
|
||||
# time the image was built
|
||||
sed -i -e "s|kata-${shim}-${MULTI_INSTALL_SUFFIX}|kata-${shim}|g" /opt/kata-artifacts/runtimeclasses/kata-${shim}.yaml
|
||||
fi
|
||||
|
||||
done
|
||||
|
||||
if [[ "${CREATE_DEFAULT_RUNTIMECLASS}" == "true" ]]; then
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
warn "CREATE_DEFAULT_RUNTIMECLASS is being ignored!"
|
||||
warn "multi installation does not support creating a default runtime class"
|
||||
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Creating the kata runtime class for the default shim (an alias for kata-${default_shim})"
|
||||
cp /opt/kata-artifacts/runtimeclasses/kata-${default_shim}.yaml /tmp/kata.yaml
|
||||
sed -i -e 's/name: kata-'${default_shim}'/name: kata/g' /tmp/kata.yaml
|
||||
@@ -97,11 +125,20 @@ function delete_runtimeclasses() {
|
||||
|
||||
for shim in "${shims[@]}"; do
|
||||
echo "Deleting the kata-${shim} runtime class"
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
sed -i -e "s|kata-${shim}|kata-${shim}-${MULTI_INSTALL_SUFFIX}|g" /opt/kata-artifacts/runtimeclasses/kata-${shim}.yaml
|
||||
fi
|
||||
kubectl delete -f /opt/kata-artifacts/runtimeclasses/kata-${shim}.yaml
|
||||
done
|
||||
|
||||
|
||||
if [[ "${CREATE_DEFAULT_RUNTIMECLASS}" == "true" ]]; then
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
# There's nothing to be done here, as a default runtime class is never created
|
||||
# for multi installations
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Deleting the kata runtime class for the default shim (an alias for kata-${default_shim})"
|
||||
cp /opt/kata-artifacts/runtimeclasses/kata-${default_shim}.yaml /tmp/kata.yaml
|
||||
sed -i -e 's/name: kata-'${default_shim}'/name: kata/g' /tmp/kata.yaml
|
||||
@@ -140,6 +177,37 @@ function get_container_runtime() {
|
||||
fi
|
||||
}
|
||||
|
||||
function is_containerd_capable_of_using_drop_in_files() {
|
||||
local runtime="$1"
|
||||
|
||||
if [ "$runtime" == "crio" ]; then
|
||||
# This should never happen but better be safe than sorry
|
||||
echo "false"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$runtime" =~ ^(k0s-worker|k0s-controller)$ ]]; then
|
||||
# k0s does the work of using drop-in files better than any other "k8s distro", so
|
||||
# we don't mess up with what's being correctly done.
|
||||
echo "false"
|
||||
return
|
||||
fi
|
||||
|
||||
local version_major=$(kubectl get node $NODE_NAME -o jsonpath='{.status.nodeInfo.containerRuntimeVersion}' | grep -oE '[0-9]+\.[0-9]+' | cut -d'.' -f1)
|
||||
if [ $version_major -lt 2 ]; then
|
||||
# Only containerd 2.0 does the merge of the plugins section from different snippets,
|
||||
# instead of overwritting the whole section, which makes things considerably more
|
||||
# complicated for us to deal with.
|
||||
#
|
||||
# It's been discussed with containerd community, and the patch needed will **NOT** be
|
||||
# backported to the release 1.7, as that breaks the behaviour from an existing release.
|
||||
echo "false"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "true"
|
||||
}
|
||||
|
||||
function get_kata_containers_config_path() {
|
||||
local shim="$1"
|
||||
|
||||
@@ -258,6 +326,10 @@ function adjust_qemu_cmdline() {
|
||||
# ${dest_dir}/opt/kata/share/kata-qemu/qemu
|
||||
# ${dest_dir}/opt/kata/share/kata-qemu-snp-experimnental/qemu
|
||||
[[ "${shim}" =~ ^(qemu-snp|qemu-nvidia-snp)$ ]] && qemu_share=${shim}-experimental
|
||||
|
||||
# Both qemu and qemu-coco-dev use exactly the same QEMU, so we can adjust
|
||||
# the shim on the qemu-coco-dev case to qemu
|
||||
[[ "${shim}" =~ ^(qemu|qemu-coco-dev)$ ]] && qemu_share="qemu"
|
||||
|
||||
qemu_binary=$(tomlq '.hypervisor.qemu.path' ${config_path} | tr -d \")
|
||||
qemu_binary_script="${qemu_binary}-installation-prefix"
|
||||
@@ -352,13 +424,13 @@ function install_artifacts() {
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -n "${INSTALLATION_PREFIX}" ]; then
|
||||
if [ "${dest_dir}" != "${default_dest_dir}" ]; then
|
||||
# We could always do this sed, regardless, but I have a strong preference
|
||||
# on not touching the configuration files unless extremelly needed
|
||||
sed -i -e "s|${default_dest_dir}|${dest_dir}|g" "${kata_config_file}"
|
||||
|
||||
# Let's only adjust qemu_cmdline for the QEMUs that we build and ship ourselves
|
||||
[[ "${shim}" =~ ^(qemu|qemu-snp|qemu-nvidia-gpu|qemu-nvidia-gpu-snp|qemu-sev|qemu-se)$ ]] && \
|
||||
[[ "${shim}" =~ ^(qemu|qemu-snp|qemu-nvidia-gpu|qemu-nvidia-gpu-snp|qemu-sev|qemu-se|qemu-coco-dev)$ ]] && \
|
||||
adjust_qemu_cmdline "${shim}" "${kata_config_file}"
|
||||
fi
|
||||
done
|
||||
@@ -410,7 +482,11 @@ function configure_cri_runtime() {
|
||||
|
||||
function configure_crio_runtime() {
|
||||
local shim="${1}"
|
||||
local runtime="kata-${shim}"
|
||||
local adjusted_shim_to_multi_install="${shim}"
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
adjusted_shim_to_multi_install="${shim}-${MULTI_INSTALL_SUFFIX}"
|
||||
fi
|
||||
local runtime="kata-${adjusted_shim_to_multi_install}"
|
||||
local configuration="configuration-${shim}"
|
||||
|
||||
local config_path=$(get_kata_containers_config_path "${shim}")
|
||||
@@ -487,30 +563,47 @@ EOF
|
||||
|
||||
function configure_containerd_runtime() {
|
||||
local shim="$2"
|
||||
local runtime="kata-${shim}"
|
||||
local adjusted_shim_to_multi_install="${shim}"
|
||||
if [ -n "${MULTI_INSTALL_SUFFIX}" ]; then
|
||||
adjusted_shim_to_multi_install="${shim}-${MULTI_INSTALL_SUFFIX}"
|
||||
fi
|
||||
local runtime="kata-${adjusted_shim_to_multi_install}"
|
||||
local configuration="configuration-${shim}"
|
||||
local pluginid=cri
|
||||
local configuration_file="${containerd_conf_file}"
|
||||
|
||||
# if we are running k0s auto containerd.toml generation, the base template is by default version 2
|
||||
# we can safely assume to reference the newer version of cri
|
||||
if grep -q "version = 2\>" $containerd_conf_file || [ "$1" == "k0s-worker" ] || [ "$1" == "k0s-controller" ]; then
|
||||
# Properly set the configuration file in case drop-in files are supported
|
||||
if [ $use_containerd_drop_in_conf_file = "true" ]; then
|
||||
configuration_file="/host${containerd_drop_in_conf_file}"
|
||||
fi
|
||||
|
||||
local containerd_root_conf_file="$containerd_conf_file"
|
||||
if [[ "$1" =~ ^(k0s-worker|k0s-controller)$ ]]; then
|
||||
containerd_root_conf_file="/etc/containerd/containerd.toml"
|
||||
fi
|
||||
|
||||
if grep -q "version = 2\>" $containerd_root_conf_file; then
|
||||
pluginid=\"io.containerd.grpc.v1.cri\"
|
||||
fi
|
||||
|
||||
if grep -q "version = 3\>" $containerd_root_conf_file; then
|
||||
pluginid=\"io.containerd.cri.v1.runtime\"
|
||||
fi
|
||||
|
||||
local runtime_table=".plugins.${pluginid}.containerd.runtimes.\"${runtime}\""
|
||||
local runtime_options_table="${runtime_table}.options"
|
||||
local runtime_type=\"io.containerd."${runtime}".v2\"
|
||||
local runtime_config_path=\"$(get_kata_containers_config_path "${shim}")/${configuration}.toml\"
|
||||
local runtime_path=\"$(get_kata_containers_runtime_path "${shim}")\"
|
||||
|
||||
tomlq -i -t $(printf '%s.runtime_type=%s' ${runtime_table} ${runtime_type}) ${containerd_conf_file}
|
||||
tomlq -i -t $(printf '%s.runtime_path=%s' ${runtime_table} ${runtime_path}) ${containerd_conf_file}
|
||||
tomlq -i -t $(printf '%s.privileged_without_host_devices=true' ${runtime_table}) ${containerd_conf_file}
|
||||
tomlq -i -t $(printf '%s.pod_annotations=["io.katacontainers.*"]' ${runtime_table}) ${containerd_conf_file}
|
||||
tomlq -i -t $(printf '%s.ConfigPath=%s' ${runtime_options_table} ${runtime_config_path}) ${containerd_conf_file}
|
||||
tomlq -i -t $(printf '%s.runtime_type=%s' ${runtime_table} ${runtime_type}) ${configuration_file}
|
||||
tomlq -i -t $(printf '%s.runtime_path=%s' ${runtime_table} ${runtime_path}) ${configuration_file}
|
||||
tomlq -i -t $(printf '%s.privileged_without_host_devices=true' ${runtime_table}) ${configuration_file}
|
||||
tomlq -i -t $(printf '%s.pod_annotations=["io.katacontainers.*"]' ${runtime_table}) ${configuration_file}
|
||||
tomlq -i -t $(printf '%s.ConfigPath=%s' ${runtime_options_table} ${runtime_config_path}) ${configuration_file}
|
||||
|
||||
if [ "${DEBUG}" == "true" ]; then
|
||||
tomlq -i -t '.debug.level = "debug"' ${containerd_conf_file}
|
||||
tomlq -i -t '.debug.level = "debug"' ${configuration_file}
|
||||
fi
|
||||
|
||||
if [ -n "${SNAPSHOTTER_HANDLER_MAPPING}" ]; then
|
||||
@@ -522,7 +615,7 @@ function configure_containerd_runtime() {
|
||||
fi
|
||||
|
||||
value="${m#*$snapshotters_delimiter}"
|
||||
tomlq -i -t $(printf '%s.snapshotter="%s"' ${runtime_table} ${value}) ${containerd_conf_file}
|
||||
tomlq -i -t $(printf '%s.snapshotter="%s"' ${runtime_table} ${value}) ${configuration_file}
|
||||
break
|
||||
done
|
||||
fi
|
||||
@@ -534,11 +627,16 @@ function configure_containerd() {
|
||||
|
||||
mkdir -p /etc/containerd/
|
||||
|
||||
if [ -f "$containerd_conf_file" ]; then
|
||||
# backup the config.toml only if a backup doesn't already exist (don't override original)
|
||||
if [ $use_containerd_drop_in_conf_file = "false" ] && [ -f "$containerd_conf_file" ]; then
|
||||
# only backup in case drop-in files are not supported, and when doing the backup
|
||||
# only do it if a backup doesn't already exist (don't override original)
|
||||
cp -n "$containerd_conf_file" "$containerd_conf_file_backup"
|
||||
fi
|
||||
|
||||
if [ $use_containerd_drop_in_conf_file = "true" ]; then
|
||||
tomlq -i -t $(printf '.imports|=.+["%s"]' ${containerd_drop_in_conf_file}) ${containerd_conf_file}
|
||||
fi
|
||||
|
||||
for shim in "${shims[@]}"; do
|
||||
configure_containerd_runtime "$1" $shim
|
||||
done
|
||||
@@ -590,6 +688,14 @@ function cleanup_crio() {
|
||||
}
|
||||
|
||||
function cleanup_containerd() {
|
||||
if [ $use_containerd_drop_in_conf_file = "true" ]; then
|
||||
# There's no need to remove the drop-in file, as it'll be removed as
|
||||
# part of the artefacts removal. Thus, simply remove the file from
|
||||
# the imports line of the containerd configuration and return.
|
||||
tomlq -i -t $(printf '.imports|=.-["%s"]' ${containerd_drop_in_conf_file}) ${containerd_conf_file}
|
||||
return
|
||||
fi
|
||||
|
||||
rm -f $containerd_conf_file
|
||||
if [ -f "$containerd_conf_file_backup" ]; then
|
||||
mv "$containerd_conf_file_backup" "$containerd_conf_file"
|
||||
@@ -673,6 +779,7 @@ function main() {
|
||||
echo "* AGENT_NO_PROXY: ${AGENT_NO_PROXY}"
|
||||
echo "* PULL_TYPE_MAPPING: ${PULL_TYPE_MAPPING}"
|
||||
echo "* INSTALLATION_PREFIX: ${INSTALLATION_PREFIX}"
|
||||
echo "* MULTI_INSTALL_SUFFIX: ${MULTI_INSTALL_SUFFIX}"
|
||||
echo "* HELM_POST_DELETE_HOOK: ${HELM_POST_DELETE_HOOK}"
|
||||
|
||||
# script requires that user is root
|
||||
@@ -693,14 +800,29 @@ function main() {
|
||||
# From 1.27.1 onwards k0s enables dynamic configuration on containerd CRI runtimes.
|
||||
# This works by k0s creating a special directory in /etc/k0s/containerd.d/ where user can drop-in partial containerd configuration snippets.
|
||||
# k0s will automatically pick up these files and adds these in containerd configuration imports list.
|
||||
containerd_conf_file="/etc/containerd/kata-containers.toml"
|
||||
containerd_conf_file="/etc/containerd/containerd.d/kata-containers.toml"
|
||||
if [ -n "$MULTI_INSTALL_SUFFIX" ]; then
|
||||
containerd_conf_file="/etc/containerd/containerd.d/kata-containers-$MULTI_INSTALL_SUFFIX.toml"
|
||||
fi
|
||||
containerd_conf_file_backup="${containerd_conf_tmpl_file}.bak"
|
||||
fi
|
||||
|
||||
|
||||
# only install / remove / update if we are dealing with CRIO or containerd
|
||||
if [[ "$runtime" =~ ^(crio|containerd|k3s|k3s-agent|rke2-agent|rke2-server|k0s-worker|k0s-controller)$ ]]; then
|
||||
if [ "$runtime" != "crio" ]; then
|
||||
containerd_snapshotter_version_check
|
||||
snapshotter_handler_mapping_validation_check
|
||||
|
||||
use_containerd_drop_in_conf_file=$(is_containerd_capable_of_using_drop_in_files "$runtime")
|
||||
echo "Using containerd drop-in files: $use_containerd_drop_in_conf_file"
|
||||
|
||||
if [[ ! "$runtime" =~ ^(k0s-worker|k0s-controller)$ ]]; then
|
||||
# We skip this check for k0s, as they handle things differently on their side
|
||||
if [ -n "$MULTI_INSTALL_SUFFIX" ] && [ $use_containerd_drop_in_conf_file = "false" ]; then
|
||||
die "Multi installation can only be done if $runtime supports drop-in configuration files"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
case "$action" in
|
||||
@@ -714,6 +836,7 @@ function main() {
|
||||
containerd_conf_file="${containerd_conf_tmpl_file}"
|
||||
containerd_conf_file_backup="${containerd_conf_tmpl_file}.bak"
|
||||
elif [[ "$runtime" =~ ^(k0s-worker|k0s-controller)$ ]]; then
|
||||
mkdir -p $(dirname "$containerd_conf_file")
|
||||
touch "$containerd_conf_file"
|
||||
elif [[ "$runtime" == "containerd" ]]; then
|
||||
if [ ! -f "$containerd_conf_file" ] && [ -d $(dirname "$containerd_conf_file") ] && [ -x $(command -v containerd) ]; then
|
||||
@@ -721,6 +844,11 @@ function main() {
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $use_containerd_drop_in_conf_file = "true" ]; then
|
||||
mkdir -p $(dirname "/host$containerd_drop_in_conf_file")
|
||||
touch "/host$containerd_drop_in_conf_file"
|
||||
fi
|
||||
|
||||
install_artifacts
|
||||
configure_cri_runtime "$runtime"
|
||||
kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=true
|
||||
@@ -731,16 +859,27 @@ function main() {
|
||||
containerd_conf_file="${containerd_conf_tmpl_file}"
|
||||
fi
|
||||
|
||||
local kata_deploy_installations=$(kubectl -n kube-system get ds | grep kata-deploy | wc -l)
|
||||
|
||||
if [ "${HELM_POST_DELETE_HOOK}" == "true" ]; then
|
||||
# Remove the label as the first thing, so we ensure no more kata-containers
|
||||
# pods would be scheduled here.
|
||||
kubectl label node "$NODE_NAME" katacontainers.io/kata-runtime-
|
||||
#
|
||||
# If we still have any other installation here, it means we'll break them
|
||||
# removing the label, so we just don't do it.
|
||||
if [ $kata_deploy_installations -eq 0 ]; then
|
||||
kubectl label node "$NODE_NAME" katacontainers.io/kata-runtime-
|
||||
fi
|
||||
fi
|
||||
|
||||
cleanup_cri_runtime "$runtime"
|
||||
if [ "${HELM_POST_DELETE_HOOK}" == "false" ]; then
|
||||
# The Confidential Containers operator relies on this label
|
||||
kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=cleanup
|
||||
# If we still have any other installation here, it means we'll break them
|
||||
# removing the label, so we just don't do it.
|
||||
if [ $kata_deploy_installations -eq 0 ]; then
|
||||
# The Confidential Containers operator relies on this label
|
||||
kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=cleanup
|
||||
fi
|
||||
fi
|
||||
remove_artifacts
|
||||
|
||||
|
||||
0
tools/packaging/qemu/patches/9.1.x/no_patches.txt
Normal file
0
tools/packaging/qemu/patches/9.1.x/no_patches.txt
Normal file
@@ -205,6 +205,16 @@ function _upload_libseccomp_tarball()
|
||||
gh release upload "${RELEASE_VERSION}" "${asc}"
|
||||
}
|
||||
|
||||
function _upload_helm_chart_tarball()
|
||||
{
|
||||
_check_required_env_var "GH_TOKEN"
|
||||
|
||||
RELEASE_VERSION="$(_release_version)"
|
||||
|
||||
helm package ${repo_root_dir}/tools/packaging/kata-deploy/helm-chart/kata-deploy
|
||||
gh release upload "${RELEASE_VERSION}" "kata-deploy-${RELEASE_VERSION}.tgz"
|
||||
}
|
||||
|
||||
function main()
|
||||
{
|
||||
action="${1:-}"
|
||||
@@ -217,6 +227,7 @@ function main()
|
||||
upload-versions-yaml-file) _upload_versions_yaml_file ;;
|
||||
upload-vendored-code-tarball) _upload_vendored_code_tarball ;;
|
||||
upload-libseccomp-tarball) _upload_libseccomp_tarball ;;
|
||||
upload-helm-chart-tarball) _upload_helm_chart_tarball ;;
|
||||
publish-release) _publish_release ;;
|
||||
*) >&2 _die "Invalid argument" ;;
|
||||
esac
|
||||
|
||||
@@ -222,9 +222,6 @@ generate_qemu_options() {
|
||||
|
||||
# Disabled options
|
||||
|
||||
# Disable block migration in the main migration stream
|
||||
qemu_options+=(size:--disable-live-block-migration)
|
||||
|
||||
# braille support not required
|
||||
qemu_options+=(size:--disable-brlapi)
|
||||
|
||||
@@ -397,6 +394,30 @@ generate_qemu_options() {
|
||||
qemu_options+=(size:--disable-vhdx)
|
||||
qemu_options+=(size:--disable-hv-balloon)
|
||||
|
||||
# Disable various features based on the qemu_version
|
||||
if gt_eq "${qemu_version}" "9.1.0" ; then
|
||||
# Disable Query Processing Library support
|
||||
qemu_options+=(size:--disable-qpl)
|
||||
# Disable UADK Library support
|
||||
qemu_options+=(size:--disable-uadk)
|
||||
# Disable syscall buffer debugging support
|
||||
qemu_options+=(size:--disable-debug-remap)
|
||||
|
||||
fi
|
||||
|
||||
# Disable gio support
|
||||
qemu_options+=(size:--disable-gio)
|
||||
# Disable libdaxctl part of ndctl support
|
||||
qemu_options+=(size:--disable-libdaxctl)
|
||||
qemu_options+=(size:--disable-oss)
|
||||
|
||||
# Building static binaries for aarch64 requires disabling PIE
|
||||
# We get an GOT overflow and the OS libraries are only build with fpic
|
||||
# and not with fPIC which enables unlimited sized GOT tables.
|
||||
if [ "${static}" == "true" ] && [ "${arch}" == "aarch64" ]; then
|
||||
qemu_options+=(arch:"--disable-pie")
|
||||
fi
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# Enabled options
|
||||
|
||||
@@ -409,6 +430,7 @@ generate_qemu_options() {
|
||||
|
||||
# Support Linux AIO (native)
|
||||
qemu_options+=(size:--enable-linux-aio)
|
||||
qemu_options+=(size:--enable-linux-io-uring)
|
||||
|
||||
# Support Ceph RADOS Block Device (RBD)
|
||||
[ -z "${static}" ] && qemu_options+=(functionality:--enable-rbd)
|
||||
@@ -427,15 +449,14 @@ generate_qemu_options() {
|
||||
# for that architecture
|
||||
if [ "$arch" == x86_64 ]; then
|
||||
qemu_options+=(speed:--enable-avx2)
|
||||
qemu_options+=(speed:--enable-avx512f)
|
||||
# According to QEMU's nvdimm documentation: When 'pmem' is 'on' and QEMU is
|
||||
# built with libpmem support, QEMU will take necessary operations to guarantee
|
||||
# the persistence of its own writes to the vNVDIMM backend.
|
||||
qemu_options+=(functionality:--enable-libpmem)
|
||||
qemu_options+=(speed:--enable-avx512bw)
|
||||
else
|
||||
qemu_options+=(speed:--disable-avx2)
|
||||
qemu_options+=(functionality:--disable-libpmem)
|
||||
fi
|
||||
# We're disabling pmem support, it is heavilly broken with
|
||||
# Ubuntu's static build of QEMU
|
||||
qemu_options+=(functionality:--disable-libpmem)
|
||||
|
||||
# Enable libc malloc_trim() for memory optimization.
|
||||
qemu_options+=(speed:--enable-malloc-trim)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Copyright (c) 2020 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
FROM ubuntu:20.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# CACHE_TIMEOUT: date to invalid cache, if the date changes the image will be rebuild
|
||||
# This is required to keep build dependencies with security fixes.
|
||||
@@ -17,13 +17,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN if [ "${ARCH}" != "$(uname -m)" ]; then sed -i 's/^deb/deb [arch=amd64]/g' /etc/apt/sources.list && \
|
||||
dpkg --add-architecture "${DPKG_ARCH#:}" && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal main restricted" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal-updates main restricted" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal universe" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal-updates universe" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal multiverse" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal-updates multiverse" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ focal-backports main restricted universe multiverse" >> /etc/apt/sources.list; fi
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy main restricted" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy-updates main restricted" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy universe" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy-updates universe" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy multiverse" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy-updates multiverse" >> /etc/apt/sources.list && \
|
||||
echo "deb [arch=${DPKG_ARCH#:}] http://ports.ubuntu.com/ jammy-backports main restricted universe multiverse" >> /etc/apt/sources.list; fi
|
||||
|
||||
RUN apt-get update && apt-get upgrade -y && \
|
||||
apt-get --no-install-recommends install -y \
|
||||
@@ -53,6 +53,7 @@ RUN apt-get update && apt-get upgrade -y && \
|
||||
libpixman-1-dev${DPKG_ARCH} \
|
||||
libselinux1-dev${DPKG_ARCH} \
|
||||
libtool${DPKG_ARCH} \
|
||||
liburing-dev${DPKG_ARCH} \
|
||||
make \
|
||||
ninja-build \
|
||||
pkg-config${DPKG_ARCH} \
|
||||
@@ -62,6 +63,7 @@ RUN apt-get update && apt-get upgrade -y && \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-venv \
|
||||
python3-tomli \
|
||||
rsync \
|
||||
zlib1g-dev${DPKG_ARCH} && \
|
||||
if [ "${ARCH}" != s390x ]; then apt-get install -y --no-install-recommends libpmem-dev${DPKG_ARCH}; fi && \
|
||||
@@ -69,10 +71,3 @@ RUN apt-get update && apt-get upgrade -y && \
|
||||
if [ "${ARCH}" != "$(uname -m)" ]; then apt-get install --no-install-recommends -y gcc-"${GCC_ARCH}"-linux-gnu; fi && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/
|
||||
|
||||
RUN git clone https://github.com/axboe/liburing/ ~/liburing && \
|
||||
cd ~/liburing && \
|
||||
git checkout tags/liburing-2.1 && \
|
||||
GCC_ARCH="${ARCH}" && if [ "${ARCH}" = "ppc64le" ]; then GCC_ARCH="powerpc64le"; fi && \
|
||||
if [ "${ARCH}" != "$(uname -m)" ]; then PREFIX="${GCC_ARCH}-linux-gnu"; fi && \
|
||||
./configure --cc=${GCC_ARCH}-linux-gnu-gcc --cxx=${GCC_ARCH}-linux-gnu-cpp --prefix=/usr/${PREFIX}/ && \
|
||||
make && make install && ldconfig
|
||||
|
||||
@@ -59,9 +59,7 @@ ${container_engine} pull ${container_image} || ("${container_engine}" build \
|
||||
# No-op unless PUSH_TO_REGISTRY is exported as "yes"
|
||||
push_to_registry "${container_image}")
|
||||
|
||||
"${container_engine}" run \
|
||||
--rm \
|
||||
-i \
|
||||
"${container_engine}" run --rm -i \
|
||||
--env BUILD_SUFFIX="${build_suffix}" \
|
||||
--env PKGVERSION="${PKGVERSION}" \
|
||||
--env QEMU_DESTDIR="${qemu_destdir}" \
|
||||
@@ -71,7 +69,9 @@ ${container_engine} pull ${container_image} || ("${container_engine}" build \
|
||||
--env HYPERVISOR_NAME="${HYPERVISOR_NAME}" \
|
||||
--env QEMU_VERSION_NUM="${qemu_version}" \
|
||||
--env ARCH="${ARCH}" \
|
||||
-v "${repo_root_dir}:/root/kata-containers" \
|
||||
--user "$(id -u)":"$(id -g)" \
|
||||
-w "${PWD}" \
|
||||
-v "${repo_root_dir}:${repo_root_dir}" \
|
||||
-v "${PWD}":/share "${container_image}" \
|
||||
bash -c "/root/kata-containers/tools/packaging/static-build/qemu/build-qemu.sh"
|
||||
bash -c "${qemu_builder}"
|
||||
|
||||
|
||||
@@ -8,7 +8,9 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
kata_packaging_dir="/root/kata-containers/tools/packaging"
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
kata_packaging_dir="${script_dir}/../.."
|
||||
kata_packaging_scripts="${kata_packaging_dir}/scripts"
|
||||
|
||||
kata_static_build_dir="${kata_packaging_dir}/static-build"
|
||||
|
||||
1
tools/packaging/static-build/tools/.gitignore
vendored
Normal file
1
tools/packaging/static-build/tools/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
install_libseccomp.sh
|
||||
@@ -10,9 +10,12 @@ COPY install_libseccomp.sh /usr/bin/install_libseccomp.sh
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ENV GO_HOME="/opt"
|
||||
ENV GOCACHE="${GO_HOME}/.cache"
|
||||
ENV GOMODCACHE="${GO_HOME}/.modcache"
|
||||
ENV RUSTUP_HOME="/opt/rustup"
|
||||
ENV CARGO_HOME="/opt/cargo"
|
||||
ENV PATH="/opt/cargo/bin/:${PATH}"
|
||||
ENV PATH="/opt/cargo/bin/:/opt/go/bin:${PATH}"
|
||||
ENV OPT_LIB="/opt/lib"
|
||||
|
||||
ENV LIBSECCOMP_LINK_TYPE=static
|
||||
@@ -44,6 +47,11 @@ RUN apt-get update && \
|
||||
# Tools only build for x86_64
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
RUN kernelname=$(uname -s | tr '[:upper:]' '[:lower:]'); \
|
||||
curl -OL "https://storage.googleapis.com/golang/go${GO_TOOLCHAIN}.${kernelname}-amd64.tar.gz" && \
|
||||
tar -C "${GO_HOME}" -xzf "go${GO_TOOLCHAIN}.${kernelname}-amd64.tar.gz" && \
|
||||
rm "go${GO_TOOLCHAIN}.${kernelname}-amd64.tar.gz"
|
||||
|
||||
# cmake looks for musl binutils
|
||||
# For setting CMAKE_AR, find_program searches for musl-ar.
|
||||
# Symlink to system ar.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user