mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 23:02:10 +00:00
Compare commits
1 Commits
3.24.0
...
fix-create
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c2e32f075 |
12
.github/actionlint.yaml
vendored
12
.github/actionlint.yaml
vendored
@@ -7,24 +7,20 @@
|
||||
self-hosted-runner:
|
||||
# Labels of self-hosted runner that linter should ignore
|
||||
labels:
|
||||
- amd64-nvidia-a100
|
||||
- amd64-nvidia-h100-snp
|
||||
- arm64-k8s
|
||||
- ubuntu-22.04-arm
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
- garm-ubuntu-2204
|
||||
- garm-ubuntu-2304
|
||||
- garm-ubuntu-2304-smaller
|
||||
- garm-ubuntu-2204-smaller
|
||||
- ppc64le
|
||||
- ppc64le-k8s
|
||||
- ppc64le-small
|
||||
- ubuntu-24.04-ppc64le
|
||||
- ubuntu-24.04-s390x
|
||||
- k8s-ppc64le
|
||||
- metrics
|
||||
- ppc64le
|
||||
- riscv-builder
|
||||
- sev-snp
|
||||
- s390x
|
||||
- s390x-large
|
||||
- tdx
|
||||
- ubuntu-24.04-arm
|
||||
- amd64-nvidia-a100
|
||||
|
||||
3
.github/dependabot.yml
vendored
3
.github/dependabot.yml
vendored
@@ -66,9 +66,6 @@ updates:
|
||||
rustix:
|
||||
patterns:
|
||||
- rustix
|
||||
slab:
|
||||
patterns:
|
||||
- slab
|
||||
time:
|
||||
patterns:
|
||||
- time
|
||||
|
||||
9
.github/workflows/actionlint.yaml
vendored
9
.github/workflows/actionlint.yaml
vendored
@@ -3,16 +3,23 @@ name: Lint GHA workflows
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths:
|
||||
- '.github/workflows/**'
|
||||
|
||||
permissions: {}
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-actionlint:
|
||||
name: run-actionlint
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
59
.github/workflows/basic-ci-amd64.yaml
vendored
59
.github/workflows/basic-ci-amd64.yaml
vendored
@@ -17,7 +17,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-containerd-sandboxapi:
|
||||
name: run-containerd-sandboxapi
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
@@ -66,12 +65,11 @@ jobs:
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-stability:
|
||||
name: run-containerd-stability
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'cloud-hypervisor', 'dragonball', 'qemu', 'qemu-runtime-rs']
|
||||
vmm: ['clh', 'cloud-hypervisor', 'dragonball', 'qemu', 'stratovirt']
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
@@ -109,7 +107,6 @@ jobs:
|
||||
run: bash tests/stability/gha-run.sh run
|
||||
|
||||
run-nydus:
|
||||
name: run-nydus
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
@@ -117,7 +114,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu', 'dragonball', 'qemu-runtime-rs']
|
||||
vmm: ['clh', 'qemu', 'dragonball', 'stratovirt']
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
@@ -155,7 +152,6 @@ jobs:
|
||||
run: bash tests/integration/nydus/gha-run.sh run
|
||||
|
||||
run-runk:
|
||||
name: run-runk
|
||||
# Skip runk tests as we have no maintainers. TODO: Decide when to remove altogether
|
||||
if: false
|
||||
runs-on: ubuntu-22.04
|
||||
@@ -191,7 +187,6 @@ jobs:
|
||||
run: bash tests/integration/runk/gha-run.sh run
|
||||
|
||||
run-tracing:
|
||||
name: run-tracing
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -236,7 +231,6 @@ jobs:
|
||||
run: bash tests/functional/tracing/gha-run.sh run
|
||||
|
||||
run-vfio:
|
||||
name: run-vfio
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -279,8 +273,53 @@ jobs:
|
||||
timeout-minutes: 15
|
||||
run: bash tests/functional/vfio/gha-run.sh run
|
||||
|
||||
run-docker-tests:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
# all due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
- dragonball
|
||||
- cloud-hypervisor
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/docker/gha-run.sh install-dependencies
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/docker/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run docker smoke test
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/docker/gha-run.sh run
|
||||
|
||||
run-nerdctl-tests:
|
||||
name: run-nerdctl-tests
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
@@ -292,7 +331,6 @@ jobs:
|
||||
- dragonball
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
- qemu-runtime-rs
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
@@ -341,7 +379,6 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
run-kata-agent-apis:
|
||||
name: run-kata-agent-apis
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
42
.github/workflows/basic-ci-s390x.yaml
vendored
42
.github/workflows/basic-ci-s390x.yaml
vendored
@@ -17,7 +17,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-containerd-sandboxapi:
|
||||
name: run-containerd-sandboxapi
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
@@ -66,7 +65,6 @@ jobs:
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-stability:
|
||||
name: run-containerd-stability
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -106,3 +104,43 @@ jobs:
|
||||
- name: Run containerd-stability tests
|
||||
timeout-minutes: 15
|
||||
run: bash tests/stability/gha-run.sh run
|
||||
|
||||
run-docker-tests:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
# all due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm: ['qemu']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/docker/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/docker/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run docker smoke test
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/docker/gha-run.sh run
|
||||
|
||||
@@ -17,7 +17,6 @@ permissions: {}
|
||||
name: Build checks preview riscv64
|
||||
jobs:
|
||||
check:
|
||||
name: check
|
||||
runs-on: ${{ inputs.instance }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -124,11 +123,9 @@ jobs:
|
||||
echo "GITHUB_RUNNER_CI_NON_VIRT=true" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component.name }}
|
||||
run: |
|
||||
cd "${COMPONENT_PATH}"
|
||||
${COMMAND}
|
||||
cd ${{ matrix.component.path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
COMMAND: ${{ matrix.command }}
|
||||
COMPONENT_PATH: ${{ matrix.component.path }}
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
||||
|
||||
18
.github/workflows/build-checks.yaml
vendored
18
.github/workflows/build-checks.yaml
vendored
@@ -11,13 +11,7 @@ permissions: {}
|
||||
name: Build checks
|
||||
jobs:
|
||||
check:
|
||||
name: check
|
||||
runs-on: >-
|
||||
${{
|
||||
( contains(inputs.instance, 's390x') && matrix.component.name == 'runtime' ) && 's390x' ||
|
||||
( contains(inputs.instance, 'ppc64le') && (matrix.component.name == 'runtime' || matrix.component.name == 'agent') ) && 'ppc64le' ||
|
||||
inputs.instance
|
||||
}}
|
||||
runs-on: ${{ inputs.instance }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -52,7 +46,6 @@ jobs:
|
||||
path: src/libs
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
- name: agent-ctl
|
||||
path: src/tools/agent-ctl
|
||||
needs:
|
||||
@@ -63,7 +56,6 @@ jobs:
|
||||
path: src/tools/kata-ctl
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
- name: trace-forwarder
|
||||
path: src/tools/trace-forwarder
|
||||
needs:
|
||||
@@ -73,8 +65,6 @@ jobs:
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
instance:
|
||||
- ${{ inputs.instance }}
|
||||
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
@@ -136,11 +126,9 @@ jobs:
|
||||
echo "GITHUB_RUNNER_CI_NON_VIRT=true" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component.name }}
|
||||
run: |
|
||||
cd "${COMPONENT_PATH}"
|
||||
eval "${COMMAND}"
|
||||
cd ${{ matrix.component.path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
COMMAND: ${{ matrix.command }}
|
||||
COMPONENT_PATH: ${{ matrix.component.path }}
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
||||
|
||||
@@ -30,7 +30,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
name: build-asset
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -63,6 +62,7 @@ jobs:
|
||||
- qemu
|
||||
- qemu-snp-experimental
|
||||
- qemu-tdx-experimental
|
||||
- stratovirt
|
||||
- trace-forwarder
|
||||
- virtiofsd
|
||||
stage:
|
||||
@@ -96,6 +96,7 @@ jobs:
|
||||
- name: Build ${{ matrix.asset }}
|
||||
id: build
|
||||
run: |
|
||||
[[ "${KATA_ASSET}" == *"nvidia"* ]] && echo "KBUILD_SIGN_PIN=${{ secrets.KBUILD_SIGN_PIN }}" >> "${GITHUB_ENV}"
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
@@ -109,19 +110,16 @@ jobs:
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
|
||||
|
||||
- name: Parse OCI image name and digest
|
||||
id: parse-oci-segments
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
run: |
|
||||
oci_image="$(<"build/${KATA_ASSET}-oci-image")"
|
||||
oci_image="$(<"build/${{ matrix.asset }}-oci-image")"
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@22ce207df3b08e061f537244349aac6ae1d214f6 # v1.2.4
|
||||
- uses: oras-project/setup-oras@5c0b487ce3fe0ce3ab0d034e63669e426e294e4d # v1.2.2
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
@@ -159,7 +157,6 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
name: build-asset-rootfs
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset
|
||||
permissions:
|
||||
@@ -171,8 +168,6 @@ jobs:
|
||||
- rootfs-image
|
||||
- rootfs-image-confidential
|
||||
- rootfs-image-mariner
|
||||
- rootfs-image-nvidia-gpu
|
||||
- rootfs-image-nvidia-gpu-confidential
|
||||
- rootfs-initrd
|
||||
- rootfs-initrd-confidential
|
||||
- rootfs-initrd-nvidia-gpu
|
||||
@@ -208,6 +203,7 @@ jobs:
|
||||
- name: Build ${{ matrix.asset }}
|
||||
id: build
|
||||
run: |
|
||||
[[ "${KATA_ASSET}" == *"nvidia"* ]] && echo "KBUILD_SIGN_PIN=${{ secrets.KBUILD_SIGN_PIN }}" >> "${GITHUB_ENV}"
|
||||
./tests/gha-adjust-to-use-prebuilt-components.sh kata-artifacts "${KATA_ASSET}"
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
@@ -222,7 +218,6 @@ jobs:
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
@@ -234,7 +229,6 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
name: remove-rootfs-binary-artifacts
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
@@ -252,7 +246,6 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts-for-release:
|
||||
name: remove-rootfs-binary-artifacts-for-release
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
@@ -266,7 +259,6 @@ jobs:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
name: build-asset-shim-v2
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts, remove-rootfs-binary-artifacts-for-release]
|
||||
permissions:
|
||||
@@ -328,7 +320,6 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
name: create-kata-tarball
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
permissions:
|
||||
|
||||
@@ -23,15 +23,12 @@ on:
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: false
|
||||
KBUILD_SIGN_PIN:
|
||||
required: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
name: build-asset
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -47,10 +44,10 @@ jobs:
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- kernel-nvidia-gpu
|
||||
- kernel-cca-confidential
|
||||
- nydus
|
||||
- ovmf
|
||||
- qemu
|
||||
- stratovirt
|
||||
- virtiofsd
|
||||
env:
|
||||
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
|
||||
@@ -90,19 +87,16 @@ jobs:
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
|
||||
|
||||
- name: Parse OCI image name and digest
|
||||
id: parse-oci-segments
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
run: |
|
||||
oci_image="$(<"build/${KATA_ASSET}-oci-image")"
|
||||
oci_image="$(<"build/${{ matrix.asset }}-oci-image")"
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@22ce207df3b08e061f537244349aac6ae1d214f6 # v1.2.4
|
||||
- uses: oras-project/setup-oras@5c0b487ce3fe0ce3ab0d034e63669e426e294e4d # v1.2.2
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
@@ -140,8 +134,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
name: build-asset-rootfs
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -150,7 +143,6 @@ jobs:
|
||||
matrix:
|
||||
asset:
|
||||
- rootfs-image
|
||||
- rootfs-image-nvidia-gpu
|
||||
- rootfs-initrd
|
||||
- rootfs-initrd-nvidia-gpu
|
||||
steps:
|
||||
@@ -197,7 +189,6 @@ jobs:
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
@@ -209,8 +200,7 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
name: remove-rootfs-binary-artifacts
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -224,8 +214,7 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts-for-release:
|
||||
name: remove-rootfs-binary-artifacts-for-release
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -238,8 +227,7 @@ jobs:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
name: build-asset-shim-v2
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts, remove-rootfs-binary-artifacts-for-release]
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -298,8 +286,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
name: create-kata-tarball
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
@@ -28,11 +28,10 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
name: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-24.04-ppc64le
|
||||
runs-on: ppc64le
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@@ -88,8 +87,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
name: build-asset-rootfs
|
||||
runs-on: ubuntu-24.04-ppc64le
|
||||
runs-on: ppc64le
|
||||
needs: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -155,7 +153,6 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
name: remove-rootfs-binary-artifacts
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
@@ -169,8 +166,7 @@ jobs:
|
||||
name: kata-artifacts-ppc64le-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
name: build-asset-shim-v2
|
||||
runs-on: ubuntu-24.04-ppc64le
|
||||
runs-on: ppc64le
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -229,8 +225,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
name: create-kata-tarball
|
||||
runs-on: ubuntu-24.04-ppc64le
|
||||
runs-on: ppc64le
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
@@ -20,12 +20,14 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
name: build-asset
|
||||
runs-on: riscv-builder
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -38,6 +40,14 @@ jobs:
|
||||
- kernel
|
||||
- virtiofsd
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
@@ -71,5 +81,5 @@ jobs:
|
||||
with:
|
||||
name: kata-artifacts-riscv64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
|
||||
retention-days: 3
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
@@ -31,8 +31,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
name: build-asset
|
||||
runs-on: ubuntu-24.04-s390x
|
||||
runs-on: s390x
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -91,10 +90,8 @@ jobs:
|
||||
- name: Parse OCI image name and digest
|
||||
id: parse-oci-segments
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
env:
|
||||
ASSET: ${{ matrix.asset }}
|
||||
run: |
|
||||
oci_image="$(<"build/${ASSET}-oci-image")"
|
||||
oci_image="$(<"build/${{ matrix.asset }}-oci-image")"
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
@@ -122,7 +119,6 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
name: build-asset-rootfs
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
permissions:
|
||||
@@ -190,7 +186,6 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-boot-image-se:
|
||||
name: build-asset-boot-image-se
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-rootfs]
|
||||
permissions:
|
||||
@@ -240,7 +235,6 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
name: remove-rootfs-binary-artifacts
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset-rootfs, build-asset-boot-image-se]
|
||||
strategy:
|
||||
@@ -256,8 +250,7 @@ jobs:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
name: build-asset-shim-v2
|
||||
runs-on: ubuntu-24.04-s390x
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -318,8 +311,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
name: create-kata-tarball
|
||||
runs-on: ubuntu-24.04-s390x
|
||||
runs-on: s390x
|
||||
needs:
|
||||
- build-asset
|
||||
- build-asset-rootfs
|
||||
|
||||
1
.github/workflows/cargo-deny-runner.yaml
vendored
1
.github/workflows/cargo-deny-runner.yaml
vendored
@@ -15,7 +15,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
cargo-deny-runner:
|
||||
name: cargo-deny-runner
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
|
||||
34
.github/workflows/ci-nightly-riscv.yaml
vendored
34
.github/workflows/ci-nightly-riscv.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 5 * * *'
|
||||
|
||||
name: Nightly CI for RISC-V
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-riscv:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-riscv64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ github.sha }}
|
||||
commit-hash: ${{ github.sha }}
|
||||
target-branch: ${{ github.ref_name }}
|
||||
|
||||
build-checks-preview:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
instance:
|
||||
- "riscv-builder"
|
||||
uses: ./.github/workflows/build-checks-preview-riscv64.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
4
.github/workflows/ci-nightly-s390x.yaml
vendored
4
.github/workflows/ci-nightly-s390x.yaml
vendored
@@ -8,7 +8,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
check-internal-test-result:
|
||||
name: check-internal-test-result
|
||||
runs-on: s390x
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -16,8 +15,7 @@ jobs:
|
||||
test_title:
|
||||
- kata-vfio-ap-e2e-tests
|
||||
- cc-vfio-ap-e2e-tests
|
||||
- cc-se-e2e-tests-go
|
||||
- cc-se-e2e-tests-rs
|
||||
- cc-se-e2e-tests
|
||||
steps:
|
||||
- name: Fetch a test result for {{ matrix.test_title }}
|
||||
run: |
|
||||
|
||||
2
.github/workflows/ci-on-push.yaml
vendored
2
.github/workflows/ci-on-push.yaml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Kata Containers CI
|
||||
on:
|
||||
pull_request_target: # zizmor: ignore[dangerous-triggers] See #11332.
|
||||
pull_request_target:
|
||||
branches:
|
||||
- 'main'
|
||||
types:
|
||||
|
||||
1
.github/workflows/ci-weekly.yaml
vendored
1
.github/workflows/ci-weekly.yaml
vendored
@@ -66,7 +66,6 @@ jobs:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
name: build-and-publish-tee-confidential-unencrypted-image
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
43
.github/workflows/ci.yaml
vendored
43
.github/workflows/ci.yaml
vendored
@@ -86,8 +86,6 @@ jobs:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
@@ -102,7 +100,7 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-arm64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-24.04-arm
|
||||
runner: ubuntu-22.04-arm
|
||||
arch: arm64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
@@ -134,6 +132,20 @@ jobs:
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
build-kata-static-tarball-riscv64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-riscv64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
publish-kata-deploy-payload-s390x:
|
||||
needs: build-kata-static-tarball-s390x
|
||||
permissions:
|
||||
@@ -147,7 +159,7 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-s390x
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-24.04-s390x
|
||||
runner: s390x
|
||||
arch: s390x
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
@@ -165,13 +177,12 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-ppc64le
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-24.04-ppc64le
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
name: build-and-publish-tee-confidential-unencrypted-image
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -213,7 +224,6 @@ jobs:
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
publish-csi-driver-amd64:
|
||||
name: publish-csi-driver-amd64
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -297,6 +307,18 @@ jobs:
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
run-k8s-tests-on-amd64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-amd64.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-k8s-tests-on-arm64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-arm64
|
||||
@@ -314,7 +336,6 @@ jobs:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-nvidia-gpu.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
@@ -416,11 +437,13 @@ jobs:
|
||||
{ containerd_version: lts, vmm: clh },
|
||||
{ containerd_version: lts, vmm: dragonball },
|
||||
{ containerd_version: lts, vmm: qemu },
|
||||
{ containerd_version: lts, vmm: stratovirt },
|
||||
{ containerd_version: lts, vmm: cloud-hypervisor },
|
||||
{ containerd_version: lts, vmm: qemu-runtime-rs },
|
||||
{ containerd_version: active, vmm: clh },
|
||||
{ containerd_version: active, vmm: dragonball },
|
||||
{ containerd_version: active, vmm: qemu },
|
||||
{ containerd_version: active, vmm: stratovirt },
|
||||
{ containerd_version: active, vmm: cloud-hypervisor },
|
||||
{ containerd_version: active, vmm: qemu-runtime-rs },
|
||||
]
|
||||
@@ -468,13 +491,13 @@ jobs:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ppc64le-small
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
containerd_version: ${{ matrix.params.containerd_version }}
|
||||
vmm: ${{ matrix.params.vmm }}
|
||||
|
||||
run-cri-containerd-tests-arm64:
|
||||
if: false
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-arm64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
1
.github/workflows/cleanup-resources.yaml
vendored
1
.github/workflows/cleanup-resources.yaml
vendored
@@ -8,7 +8,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
cleanup-resources:
|
||||
name: cleanup-resources
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
id-token: write # Used for OIDC access to log into Azure
|
||||
|
||||
2
.github/workflows/commit-message-check.yaml
vendored
2
.github/workflows/commit-message-check.yaml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
filter_out_pattern: '^Revert "|^Reapply "'
|
||||
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@f2279e6e62d5a7d9115b0cb8e837b777b1b02e21 # v1.1.0
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20 # master (2020-04-28)
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
|
||||
12
.github/workflows/darwin-tests.yaml
vendored
12
.github/workflows/darwin-tests.yaml
vendored
@@ -15,17 +15,8 @@ concurrency:
|
||||
name: Darwin tests
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install Protoc
|
||||
run: |
|
||||
f=$(mktemp)
|
||||
curl -sSLo "$f" https://github.com/protocolbuffers/protobuf/releases/download/v28.2/protoc-28.2-osx-aarch_64.zip
|
||||
mkdir -p "$HOME/.local"
|
||||
unzip -d "$HOME/.local" "$f"
|
||||
echo "$HOME/.local/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -36,8 +27,5 @@ jobs:
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Install Rust
|
||||
run: ./tests/install_rust.sh
|
||||
|
||||
- name: Build utils
|
||||
run: ./ci/darwin-test.sh
|
||||
|
||||
7
.github/workflows/docs-url-alive-check.yaml
vendored
7
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -1,14 +1,12 @@
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 0'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
|
||||
name: Docs URL Alive Check
|
||||
jobs:
|
||||
test:
|
||||
name: test
|
||||
runs-on: ubuntu-22.04
|
||||
# don't run this action on forks
|
||||
if: github.repository_owner == 'kata-containers'
|
||||
@@ -17,12 +15,13 @@ jobs:
|
||||
steps:
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "GOPATH=${GITHUB_WORKSPACE}" >> "$GITHUB_ENV"
|
||||
echo "GOPATH=${{ github.workspace }}" >> "$GITHUB_ENV"
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Install golang
|
||||
run: |
|
||||
@@ -31,4 +30,4 @@ jobs:
|
||||
|
||||
- name: Docs URL Alive Check
|
||||
run: |
|
||||
make docs-url-alive-check
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}" && make docs-url-alive-check
|
||||
|
||||
1
.github/workflows/gatekeeper-skipper.yaml
vendored
1
.github/workflows/gatekeeper-skipper.yaml
vendored
@@ -35,7 +35,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
skipper:
|
||||
name: skipper
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
skip_build: ${{ steps.skipper.outputs.skip_build }}
|
||||
|
||||
5
.github/workflows/gatekeeper.yaml
vendored
5
.github/workflows/gatekeeper.yaml
vendored
@@ -5,14 +5,12 @@ name: Gatekeeper
|
||||
# reporting the status.
|
||||
|
||||
on:
|
||||
pull_request_target: # zizmor: ignore[dangerous-triggers] See #11332.
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- edited
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
permissions: {}
|
||||
|
||||
@@ -22,7 +20,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
gatekeeper:
|
||||
name: gatekeeper
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
actions: read
|
||||
|
||||
8
.github/workflows/govulncheck.yaml
vendored
8
.github/workflows/govulncheck.yaml
vendored
@@ -7,7 +7,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
govulncheck:
|
||||
name: govulncheck
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -40,14 +39,11 @@ jobs:
|
||||
- name: Build runtime binaries
|
||||
run: |
|
||||
cd src/runtime
|
||||
make "${MAKE_TARGET}"
|
||||
make ${{ matrix.make_target }}
|
||||
env:
|
||||
MAKE_TARGET: ${{ matrix.make_target }}
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
||||
|
||||
- name: Run govulncheck on ${{ matrix.binary }}
|
||||
env:
|
||||
BINARY: ${{ matrix.binary }}
|
||||
run: |
|
||||
cd src/runtime
|
||||
bash ../../tests/govulncheck-runner.sh "./${BINARY}"
|
||||
bash ../../tests/govulncheck-runner.sh "./${{ matrix.binary }}"
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
name: kata-runtime-classes-sync
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
@@ -16,7 +14,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
kata-deploy-runtime-classes-check:
|
||||
name: kata-deploy-runtime-classes-check
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
name: nydus-snapshotter-version-sync
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
nydus-snapshotter-version-check:
|
||||
name: nydus-snapshotter-version-check
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Ensure nydus-snapshotter-version is in sync inside our repo
|
||||
run: |
|
||||
dockerfile_version=$(grep "ARG NYDUS_SNAPSHOTTER_VERSION" tools/packaging/kata-deploy/Dockerfile | cut -f2 -d'=')
|
||||
versions_version=$(yq ".externals.nydus-snapshotter.version | explode(.)" versions.yaml)
|
||||
if [[ "${dockerfile_version}" != "${versions_version}" ]]; then
|
||||
echo "nydus-snapshotter version must be the same in the following places: "
|
||||
echo "- versions.yaml: ${versions_version}"
|
||||
echo "- tools/packaging/kata-deploy/Dockerfile: ${dockerfile_version}"
|
||||
exit 1
|
||||
fi
|
||||
45
.github/workflows/payload-after-push.yaml
vendored
45
.github/workflows/payload-after-push.yaml
vendored
@@ -39,7 +39,6 @@ jobs:
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
|
||||
|
||||
build-assets-s390x:
|
||||
permissions:
|
||||
@@ -97,7 +96,7 @@ jobs:
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-arm64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ubuntu-24.04-arm
|
||||
runner: ubuntu-22.04-arm
|
||||
arch: arm64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
@@ -131,13 +130,12 @@ jobs:
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-ppc64le
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ppc64le-small
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
publish-manifest:
|
||||
name: publish-manifest
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -162,42 +160,3 @@ jobs:
|
||||
env:
|
||||
KATA_DEPLOY_IMAGE_TAGS: "kata-containers-latest"
|
||||
KATA_DEPLOY_REGISTRIES: "quay.io/kata-containers/kata-deploy-ci"
|
||||
|
||||
upload-helm-chart-tarball:
|
||||
name: upload-helm-chart-tarball
|
||||
needs: publish-manifest
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
packages: write # needed to push the helm chart to ghcr.io
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0
|
||||
id: install
|
||||
|
||||
- name: Login to the OCI registries
|
||||
env:
|
||||
QUAY_DEPLOYER_USERNAME: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo "${QUAY_DEPLOYER_PASSWORD}" | helm registry login quay.io --username "${QUAY_DEPLOYER_USERNAME}" --password-stdin
|
||||
echo "${GITHUB_TOKEN}" | helm registry login ghcr.io --username "${GITHUB_ACTOR}" --password-stdin
|
||||
|
||||
- name: Push helm chart to the OCI registries
|
||||
run: |
|
||||
echo "Adjusting the Chart.yaml and values.yaml"
|
||||
yq eval '.version = "0.0.0-dev" | .appVersion = "0.0.0-dev"' -i tools/packaging/kata-deploy/helm-chart/kata-deploy/Chart.yaml
|
||||
yq eval '.image.reference = "quay.io/kata-containers/kata-deploy-ci" | .image.tag = "kata-containers-latest"' -i tools/packaging/kata-deploy/helm-chart/kata-deploy/values.yaml
|
||||
|
||||
echo "Generating the chart package"
|
||||
helm dependencies update tools/packaging/kata-deploy/helm-chart/kata-deploy
|
||||
helm package tools/packaging/kata-deploy/helm-chart/kata-deploy
|
||||
|
||||
echo "Pushing the chart to the OCI registries"
|
||||
helm push "kata-deploy-0.0.0-dev.tgz" oci://quay.io/kata-containers/kata-deploy-charts
|
||||
helm push "kata-deploy-0.0.0-dev.tgz" oci://ghcr.io/kata-containers/kata-deploy-charts
|
||||
|
||||
@@ -38,7 +38,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
name: kata-payload
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
1
.github/workflows/release-amd64.yaml
vendored
1
.github/workflows/release-amd64.yaml
vendored
@@ -29,7 +29,6 @@ jobs:
|
||||
attestations: write
|
||||
|
||||
kata-deploy:
|
||||
name: kata-deploy
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
6
.github/workflows/release-arm64.yaml
vendored
6
.github/workflows/release-arm64.yaml
vendored
@@ -8,8 +8,6 @@ on:
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
KBUILD_SIGN_PIN:
|
||||
required: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
@@ -21,7 +19,6 @@ jobs:
|
||||
stage: release
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -29,12 +26,11 @@ jobs:
|
||||
attestations: write
|
||||
|
||||
kata-deploy:
|
||||
name: kata-deploy
|
||||
needs: build-kata-static-tarball-arm64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-24.04-arm
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
|
||||
3
.github/workflows/release-ppc64le.yaml
vendored
3
.github/workflows/release-ppc64le.yaml
vendored
@@ -26,12 +26,11 @@ jobs:
|
||||
attestations: write
|
||||
|
||||
kata-deploy:
|
||||
name: kata-deploy
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-24.04-ppc64le
|
||||
runs-on: ppc64le
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
|
||||
3
.github/workflows/release-s390x.yaml
vendored
3
.github/workflows/release-s390x.yaml
vendored
@@ -30,12 +30,11 @@ jobs:
|
||||
|
||||
|
||||
kata-deploy:
|
||||
name: kata-deploy
|
||||
needs: build-kata-static-tarball-s390x
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-24.04-s390x
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
|
||||
16
.github/workflows/release.yaml
vendored
16
.github/workflows/release.yaml
vendored
@@ -6,7 +6,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release create` command
|
||||
@@ -49,7 +48,6 @@ jobs:
|
||||
target-arch: arm64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
|
||||
|
||||
build-and-push-assets-s390x:
|
||||
needs: release
|
||||
@@ -79,7 +77,6 @@ jobs:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
publish-multi-arch-images:
|
||||
name: publish-multi-arch-images
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le]
|
||||
permissions:
|
||||
@@ -117,7 +114,6 @@ jobs:
|
||||
KATA_DEPLOY_REGISTRIES: "quay.io/kata-containers/kata-deploy ghcr.io/kata-containers/kata-deploy"
|
||||
|
||||
upload-multi-arch-static-tarball:
|
||||
name: upload-multi-arch-static-tarball
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le]
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
@@ -182,7 +178,6 @@ jobs:
|
||||
ARCHITECTURE: ppc64le
|
||||
|
||||
upload-versions-yaml:
|
||||
name: upload-versions-yaml
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
@@ -200,7 +195,6 @@ jobs:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
name: upload-cargo-vendored-tarball
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
@@ -218,7 +212,6 @@ jobs:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
upload-libseccomp-tarball:
|
||||
name: upload-libseccomp-tarball
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
@@ -236,7 +229,6 @@ jobs:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
upload-helm-chart-tarball:
|
||||
name: upload-helm-chart-tarball
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
@@ -261,11 +253,10 @@ jobs:
|
||||
- name: Login to the OCI registries
|
||||
env:
|
||||
QUAY_DEPLOYER_USERNAME: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
run: |
|
||||
echo "${QUAY_DEPLOYER_PASSWORD}" | helm registry login quay.io --username "${QUAY_DEPLOYER_USERNAME}" --password-stdin
|
||||
echo "${GITHUB_TOKEN}" | helm registry login ghcr.io --username "${GITHUB_ACTOR}" --password-stdin
|
||||
echo "${{ secrets.QUAY_DEPLOYER_PASSWORD }}" | helm registry login quay.io --username "${QUAY_DEPLOYER_USERNAME}" --password-stdin
|
||||
echo "${{ github.token }}" | helm registry login ghcr.io --username "${GITHUB_ACTOR}" --password-stdin
|
||||
|
||||
- name: Push helm chart to the OCI registries
|
||||
run: |
|
||||
@@ -274,7 +265,6 @@ jobs:
|
||||
helm push "kata-deploy-${release_version}.tgz" oci://ghcr.io/kata-containers/kata-deploy-charts
|
||||
|
||||
publish-release:
|
||||
name: publish-release
|
||||
needs: [ build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le, publish-multi-arch-images, upload-multi-arch-static-tarball, upload-versions-yaml, upload-cargo-vendored-tarball, upload-libseccomp-tarball ]
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
|
||||
21
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
21
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@@ -38,7 +38,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
name: run-k8s-tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -49,6 +48,7 @@ jobs:
|
||||
- dragonball
|
||||
- qemu
|
||||
- qemu-runtime-rs
|
||||
- stratovirt
|
||||
- cloud-hypervisor
|
||||
instance-type:
|
||||
- small
|
||||
@@ -58,13 +58,16 @@ jobs:
|
||||
vmm: clh
|
||||
instance-type: small
|
||||
genpolicy-pull-method: oci-distribution
|
||||
auto-generate-policy: yes
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
instance-type: small
|
||||
genpolicy-pull-method: containerd
|
||||
auto-generate-policy: yes
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
instance-type: normal
|
||||
auto-generate-policy: yes
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -78,8 +81,10 @@ jobs:
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
GENPOLICY_PULL_METHOD: ${{ matrix.genpolicy-pull-method }}
|
||||
AUTO_GENERATE_POLICY: ${{ matrix.auto-generate-policy }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -135,25 +140,13 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh get-cluster-credentials
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-aks
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 60
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Refresh OIDC token in case access token expired
|
||||
if: always()
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
|
||||
114
.github/workflows/run-k8s-tests-on-amd64.yaml
vendored
Normal file
114
.github/workflows/run-k8s-tests-on-amd64.yaml
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
name: CI | Run kubernetes tests on amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-amd64:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh #cloud-hypervisor
|
||||
- dragonball
|
||||
- fc #firecracker
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
container_runtime:
|
||||
- containerd
|
||||
snapshotter:
|
||||
- devmapper
|
||||
k8s:
|
||||
- k3s
|
||||
include:
|
||||
- vmm: qemu
|
||||
container_runtime: crio
|
||||
snapshotter: ""
|
||||
k8s: k0s
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
KUBERNETES_EXTRA_PARAMS: ${{ matrix.container_runtime != 'crio' && '' || '--cri-socket remote:unix:///var/run/crio/crio.sock --kubelet-extra-args --cgroup-driver="systemd"' }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
CONTAINER_RUNTIME: ${{ matrix.container_runtime }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Configure CRI-O
|
||||
if: matrix.container_runtime == 'crio'
|
||||
run: bash tests/integration/kubernetes/gha-run.sh setup-crio
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
env:
|
||||
CONTAINER_RUNTIME: ${{ matrix.container_runtime }}
|
||||
|
||||
- name: Configure the ${{ matrix.snapshotter }} snapshotter
|
||||
if: matrix.snapshotter != ''
|
||||
run: bash tests/integration/kubernetes/gha-run.sh configure-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.snapshotter }}-${{ matrix.k8s }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
10
.github/workflows/run-k8s-tests-on-arm64.yaml
vendored
10
.github/workflows/run-k8s-tests-on-arm64.yaml
vendored
@@ -26,7 +26,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-on-arm64:
|
||||
name: run-k8s-tests-on-arm64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -42,6 +41,7 @@ jobs:
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
TARGET_ARCH: "aarch64"
|
||||
steps:
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Install `bats`
|
||||
@@ -68,10 +68,6 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
@@ -86,5 +82,5 @@ jobs:
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 15
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
name: CI | Run NVIDIA GPU kubernetes tests on amd64
|
||||
name: CI | Run NVIDIA GPU kubernetes tests on arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: true
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
@@ -32,24 +29,23 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-nvidia-gpu-tests-on-amd64:
|
||||
name: run-${{ matrix.environment.name }}-tests-on-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
environment: [
|
||||
{ name: nvidia-gpu, vmm: qemu-nvidia-gpu, runner: amd64-nvidia-a100 },
|
||||
{ name: nvidia-gpu-snp, vmm: qemu-nvidia-gpu-snp, runner: amd64-nvidia-h100-snp },
|
||||
]
|
||||
runs-on: ${{ matrix.environment.runner }}
|
||||
vmm:
|
||||
- qemu-nvidia-gpu
|
||||
k8s:
|
||||
- kubeadm
|
||||
runs-on: amd64-nvidia-a100
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.environment.vmm }}
|
||||
KUBERNETES: kubeadm
|
||||
KBS: ${{ matrix.environment.name == 'nvidia-gpu-snp' && 'true' || 'false' }}
|
||||
K8S_TEST_HOST_TYPE: baremetal
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -63,68 +59,31 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-artifacts
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
if: matrix.environment.name != 'nvidia-gpu'
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
if: matrix.environment.name != 'nvidia-gpu'
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
env:
|
||||
NVIDIA_VERIFIER_MODE: remote
|
||||
KBS_INGRESS: nodeport
|
||||
|
||||
- name: Install `kbs-client`
|
||||
if: matrix.environment.name != 'nvidia-gpu'
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests ${{ matrix.environment.vmm }}
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-nv-tests
|
||||
env:
|
||||
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Collect artifacts ${{ matrix.environment.vmm }}
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.environment.vmm }}
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: k8s-tests-${{ matrix.environment.vmm }}-kubeadm-${{ inputs.tag }}
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.k8s }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 15
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: always() && matrix.environment.name != 'nvidia-gpu'
|
||||
run: |
|
||||
bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
19
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
19
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
@@ -26,7 +26,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
name: run-k8s-tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -34,7 +33,7 @@ jobs:
|
||||
- qemu
|
||||
k8s:
|
||||
- kubeadm
|
||||
runs-on: ppc64le-k8s
|
||||
runs-on: k8s-ppc64le
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@@ -43,6 +42,7 @@ jobs:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
TARGET_ARCH: "ppc64le"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
@@ -62,20 +62,19 @@ jobs:
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
|
||||
- name: Prepare the runner for k8s test suite
|
||||
run: bash "${HOME}/scripts/k8s_cluster_prepare.sh"
|
||||
- name: Prepare the runner for k8s cluster creation
|
||||
run: bash "${HOME}/scripts/k8s_cluster_cleanup.sh"
|
||||
|
||||
- name: Check if cluster is healthy to run the tests
|
||||
run: bash "${HOME}/scripts/k8s_cluster_check.sh"
|
||||
- name: Create k8s cluster using kubeadm
|
||||
run: bash "${HOME}/scripts/k8s_cluster_create.sh"
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-kubeadm
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
- name: Delete cluster and post cleanup actions
|
||||
run: bash "${HOME}/scripts/k8s_cluster_cleanup.sh"
|
||||
|
||||
14
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
14
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
@@ -29,7 +29,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
name: run-k8s-tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -46,9 +45,11 @@ jobs:
|
||||
include:
|
||||
- snapshotter: devmapper
|
||||
pull-type: default
|
||||
using-nfd: true
|
||||
deploy-cmd: configure-snapshotter
|
||||
- snapshotter: nydus
|
||||
pull-type: guest-pull
|
||||
using-nfd: false
|
||||
deploy-cmd: deploy-snapshotter
|
||||
exclude:
|
||||
- snapshotter: overlayfs
|
||||
@@ -74,6 +75,7 @@ jobs:
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: ${{ matrix.using-nfd }}
|
||||
TARGET_ARCH: "s390x"
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
@@ -103,13 +105,11 @@ jobs:
|
||||
# qemu-runtime-rs only works with overlayfs
|
||||
# See: https://github.com/kata-containers/kata-containers/issues/10066
|
||||
- name: Configure the ${{ matrix.snapshotter }} snapshotter
|
||||
env:
|
||||
DEPLOY_CMD: ${{ matrix.deploy-cmd }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh "${DEPLOY_CMD}"
|
||||
run: bash tests/integration/kubernetes/gha-run.sh ${{ matrix.deploy-cmd }}
|
||||
if: ${{ matrix.snapshotter != 'overlayfs' }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-zvsi
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
@@ -131,10 +131,6 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-zvsi
|
||||
|
||||
@@ -40,13 +40,11 @@ permissions: {}
|
||||
jobs:
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
run-stability-k8s-tests-coco-nontee:
|
||||
name: run-stability-k8s-tests-coco-nontee
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-coco-dev
|
||||
- qemu-coco-dev-runtime-rs
|
||||
snapshotter:
|
||||
- nydus
|
||||
pull-type:
|
||||
@@ -71,6 +69,7 @@ jobs:
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -140,18 +139,6 @@ jobs:
|
||||
timeout-minutes: 300
|
||||
run: bash tests/stability/gha-stability-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Refresh OIDC token in case access token expired
|
||||
if: always()
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
|
||||
248
.github/workflows/run-kata-coco-tests.yaml
vendored
248
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -39,17 +39,17 @@ on:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-on-tee:
|
||||
name: run-k8s-tests-on-tee
|
||||
run-k8s-tests-on-tdx:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- runner: tdx
|
||||
vmm: qemu-tdx
|
||||
- runner: sev-snp
|
||||
vmm: qemu-snp
|
||||
runs-on: ${{ matrix.runner }}
|
||||
vmm:
|
||||
- qemu-tdx
|
||||
snapshotter:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: tdx
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@@ -57,14 +57,15 @@ jobs:
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "true"
|
||||
KBS: "true"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
KBS_INGRESS: "nodeport"
|
||||
SNAPSHOTTER: "nydus"
|
||||
PULL_TYPE: "guest-pull"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
GH_ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
@@ -79,9 +80,13 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Snapshotter
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
timeout-minutes: 10
|
||||
@@ -90,8 +95,6 @@ jobs:
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
env:
|
||||
ITA_KEY: ${{ env.KATA_HYPERVISOR == 'qemu-tdx' && env.GH_ITA_KEY || '' }}
|
||||
|
||||
- name: Install `kbs-client`
|
||||
timeout-minutes: 10
|
||||
@@ -105,19 +108,102 @@ jobs:
|
||||
timeout-minutes: 100
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
|
||||
|
||||
- name: Delete Snapshotter
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
run-k8s-tests-sev-snp:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-snp
|
||||
snapshotter:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: sev-snp
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
KBS: "true"
|
||||
KBS_INGRESS: "nodeport"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
[[ "${KATA_HYPERVISOR}" == "qemu-tdx" ]] && echo "ITA_KEY=${GH_ITA_KEY}" >> "${GITHUB_ENV}"
|
||||
bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Snapshotter
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
|
||||
- name: Install `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp
|
||||
|
||||
- name: Delete Snapshotter
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
@@ -125,21 +211,15 @@ jobs:
|
||||
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
run-k8s-tests-coco-nontee:
|
||||
name: run-k8s-tests-coco-nontee
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-coco-dev
|
||||
- qemu-coco-dev-runtime-rs
|
||||
snapshotter:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
include:
|
||||
- pull-type: experimental-force-guest-pull
|
||||
vmm: qemu-coco-dev
|
||||
snapshotter: ""
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
id-token: write # Used for OIDC access to log into Azure
|
||||
@@ -159,12 +239,13 @@ jobs:
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
EXPERIMENTAL_FORCE_GUEST_PULL: ${{ matrix.pull-type == 'experimental-force-guest-pull' && matrix.vmm || '' }}
|
||||
# Caution: current ingress controller used to expose the KBS service
|
||||
# requires much vCPUs, lefting only a few for the tests. Depending on the
|
||||
# host type chose it will result on the creation of a cluster with
|
||||
# insufficient resources.
|
||||
K8S_TEST_HOST_TYPE: "all"
|
||||
USING_NFD: "false"
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -214,12 +295,13 @@ jobs:
|
||||
- name: Download credentials for the Kubernetes CLI to use them
|
||||
run: bash tests/integration/kubernetes/gha-run.sh get-cluster-credentials
|
||||
|
||||
- name: Deploy Snapshotter
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-aks
|
||||
env:
|
||||
USE_EXPERIMENTAL_SETUP_SNAPSHOTTER: ${{ env.SNAPSHOTTER == 'nydus' }}
|
||||
AUTO_GENERATE_POLICY: ${{ env.PULL_TYPE == 'experimental-force-guest-pull' && 'no' || 'yes' }}
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
@@ -241,106 +323,6 @@ jobs:
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Refresh OIDC token in case access token expired
|
||||
if: always()
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
|
||||
# Generate jobs for testing CoCo on non-TEE environments with erofs-snapshotter
|
||||
run-k8s-tests-coco-nontee-with-erofs-snapshotter:
|
||||
name: run-k8s-tests-coco-nontee-with-erofs-snapshotter
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-coco-dev
|
||||
snapshotter:
|
||||
- erofs
|
||||
pull-type:
|
||||
- default
|
||||
runs-on: ubuntu-24.04
|
||||
environment: ci
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
# Some tests rely on that variable to run (or not)
|
||||
KBS: "false"
|
||||
# Set the KBS ingress handler (empty string disables handling)
|
||||
KBS_INGRESS: ""
|
||||
KUBERNETES: "vanilla"
|
||||
CONTAINER_ENGINE: "containerd"
|
||||
CONTAINER_ENGINE_VERSION: "v2.2"
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USE_EXPERIMENTAL_SETUP_SNAPSHOTTER: "true"
|
||||
K8S_TEST_HOST_TYPE: "all"
|
||||
# We are skipping the auto generated policy tests for now,
|
||||
# but those should be enabled as soon as we work on that.
|
||||
AUTO_GENERATE_POLICY: "no"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Remove unnecessary directories to free up space
|
||||
run: |
|
||||
sudo rm -rf /usr/local/.ghcup
|
||||
sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /usr/local/share/boost
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo rm -rf /usr/lib/jvm
|
||||
sudo rm -rf /usr/share/swift
|
||||
sudo rm -rf /usr/local/share/powershell
|
||||
sudo rm -rf /usr/local/julia*
|
||||
sudo rm -rf /opt/az
|
||||
sudo rm -rf /usr/local/share/chromium
|
||||
sudo rm -rf /opt/microsoft
|
||||
sudo rm -rf /opt/google
|
||||
sudo rm -rf /usr/lib/firefox
|
||||
|
||||
- name: Deploy kubernetes
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 20
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 80
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
@@ -33,7 +33,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests:
|
||||
name: run-kata-deploy-tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -59,6 +58,7 @@ jobs:
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -102,18 +102,6 @@ jobs:
|
||||
- name: Run tests
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Refresh OIDC token in case access token expired
|
||||
if: always()
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh delete-cluster
|
||||
|
||||
25
.github/workflows/run-kata-deploy-tests.yaml
vendored
25
.github/workflows/run-kata-deploy-tests.yaml
vendored
@@ -26,7 +26,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests:
|
||||
name: run-kata-deploy-tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -45,6 +44,7 @@ jobs:
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
@@ -58,25 +58,6 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Remove unnecessary directories to free up space
|
||||
run: |
|
||||
sudo rm -rf /usr/local/.ghcup
|
||||
sudo rm -rf /opt/hostedtoolcache/CodeQL
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf /usr/local/share/boost
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo rm -rf /usr/lib/jvm
|
||||
sudo rm -rf /usr/share/swift
|
||||
sudo rm -rf /usr/local/share/powershell
|
||||
sudo rm -rf /usr/local/julia*
|
||||
sudo rm -rf /opt/az
|
||||
sudo rm -rf /usr/local/share/chromium
|
||||
sudo rm -rf /opt/microsoft
|
||||
sudo rm -rf /opt/google
|
||||
sudo rm -rf /usr/lib/firefox
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh deploy-k8s
|
||||
|
||||
@@ -85,7 +66,3 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
@@ -17,7 +17,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-monitor:
|
||||
name: run-monitor
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
2
.github/workflows/run-metrics.yaml
vendored
2
.github/workflows/run-metrics.yaml
vendored
@@ -26,7 +26,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-metrics:
|
||||
name: run-metrics
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
@@ -44,6 +43,7 @@ jobs:
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
USING_NFD: "false"
|
||||
KUBERNETES: kubeadm
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
1
.github/workflows/run-runk-tests.yaml
vendored
1
.github/workflows/run-runk-tests.yaml
vendored
@@ -17,7 +17,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-runk:
|
||||
name: run-runk
|
||||
# Skip runk tests as we have no maintainers. TODO: Decide when to remove altogether
|
||||
if: false
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
3
.github/workflows/shellcheck.yaml
vendored
3
.github/workflows/shellcheck.yaml
vendored
@@ -18,7 +18,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
name: shellcheck
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
@@ -27,6 +26,6 @@ jobs:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0
|
||||
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master (2024-06-20)
|
||||
with:
|
||||
ignore_paths: "**/vendor/**"
|
||||
|
||||
3
.github/workflows/shellcheck_required.yaml
vendored
3
.github/workflows/shellcheck_required.yaml
vendored
@@ -19,7 +19,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
shellcheck-required:
|
||||
name: shellcheck-required
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
@@ -29,7 +28,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0
|
||||
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master (2024-06-20)
|
||||
with:
|
||||
severity: error
|
||||
ignore_paths: "**/vendor/**"
|
||||
|
||||
1
.github/workflows/stale.yaml
vendored
1
.github/workflows/stale.yaml
vendored
@@ -8,7 +8,6 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: stale
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
|
||||
18
.github/workflows/static-checks-self-hosted.yaml
vendored
18
.github/workflows/static-checks-self-hosted.yaml
vendored
@@ -28,9 +28,21 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
instance:
|
||||
- "ubuntu-24.04-arm"
|
||||
- "ubuntu-24.04-s390x"
|
||||
- "ubuntu-24.04-ppc64le"
|
||||
- "ubuntu-22.04-arm"
|
||||
- "s390x"
|
||||
- "ppc64le"
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
|
||||
build-checks-preview:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
instance:
|
||||
- "riscv-builder"
|
||||
uses: ./.github/workflows/build-checks-preview-riscv64.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
|
||||
22
.github/workflows/static-checks.yaml
vendored
22
.github/workflows/static-checks.yaml
vendored
@@ -22,7 +22,6 @@ jobs:
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
check-kernel-config-version:
|
||||
name: check-kernel-config-version
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
runs-on: ubuntu-22.04
|
||||
@@ -55,7 +54,6 @@ jobs:
|
||||
instance: ubuntu-22.04
|
||||
|
||||
build-checks-depending-on-kvm:
|
||||
name: build-checks-depending-on-kvm
|
||||
runs-on: ubuntu-22.04
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
@@ -90,16 +88,13 @@ jobs:
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
|
||||
run: |
|
||||
export PATH="$PATH:${HOME}/.cargo/bin"
|
||||
cd "${COMPONENT_PATH}"
|
||||
eval "${COMMAND}"
|
||||
cd ${{ matrix.component-path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
COMMAND: ${{ matrix.command }}
|
||||
COMPONENT_PATH: ${{ matrix.component-path }}
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
|
||||
static-checks:
|
||||
name: static-checks
|
||||
runs-on: ubuntu-22.04
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
@@ -122,13 +117,13 @@ jobs:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Install yq
|
||||
run: |
|
||||
cd "${GOPATH}/src/github.com/${GITHUB_REPOSITORY}"
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}"
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
run: |
|
||||
cd "${GOPATH}/src/github.com/${GITHUB_REPOSITORY}"
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}"
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Install system dependencies
|
||||
@@ -136,7 +131,7 @@ jobs:
|
||||
sudo apt-get update && sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
- name: Install open-policy-agent
|
||||
run: |
|
||||
cd "${GOPATH}/src/github.com/${GITHUB_REPOSITORY}"
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}"
|
||||
./tests/install_opa.sh
|
||||
- name: Install regorus
|
||||
env:
|
||||
@@ -144,13 +139,11 @@ jobs:
|
||||
ARTEFACT_REGISTRY_USERNAME: "${{ github.actor }}"
|
||||
ARTEFACT_REGISTRY_PASSWORD: "${{ secrets.GITHUB_TOKEN }}"
|
||||
run: |
|
||||
"${GOPATH}/src/github.com/${GITHUB_REPOSITORY}/tests/install_regorus.sh"
|
||||
"${GOPATH}/src/github.com/${{ github.repository }}/tests/install_regorus.sh"
|
||||
- name: Run check
|
||||
env:
|
||||
CMD: ${{ matrix.cmd }}
|
||||
run: |
|
||||
export PATH="${PATH}:${GOPATH}/bin"
|
||||
cd "${GOPATH}/src/github.com/${GITHUB_REPOSITORY}" && ${CMD}
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}" && ${{ matrix.cmd }}
|
||||
|
||||
govulncheck:
|
||||
needs: skipper
|
||||
@@ -158,7 +151,6 @@ jobs:
|
||||
uses: ./.github/workflows/govulncheck.yaml
|
||||
|
||||
codegen:
|
||||
name: codegen
|
||||
runs-on: ubuntu-22.04
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
|
||||
13
.github/workflows/zizmor.yaml
vendored
13
.github/workflows/zizmor.yaml
vendored
@@ -1,6 +1,8 @@
|
||||
name: GHA security analysis
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
|
||||
permissions: {}
|
||||
@@ -11,8 +13,10 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
zizmor:
|
||||
name: zizmor
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
@@ -21,9 +25,4 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run zizmor
|
||||
uses: zizmorcore/zizmor-action@e673c3917a1aef3c65c972347ed84ccd013ecda4 # v0.2.0
|
||||
with:
|
||||
advanced-security: false
|
||||
annotations: true
|
||||
persona: auditor
|
||||
version: v1.13.0
|
||||
uses: zizmorcore/zizmor-action@f52a838cfabf134edcbaa7c8b3677dde20045018 # v0.1.1
|
||||
|
||||
3
.github/zizmor.yml
vendored
3
.github/zizmor.yml
vendored
@@ -1,3 +0,0 @@
|
||||
rules:
|
||||
undocumented-permissions:
|
||||
disable: true
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,4 +18,3 @@ src/tools/log-parser/kata-log-parser
|
||||
tools/packaging/static-build/agent/install_libseccomp.sh
|
||||
.envrc
|
||||
.direnv
|
||||
**/.DS_Store
|
||||
|
||||
72
Cargo.toml
72
Cargo.toml
@@ -1,72 +0,0 @@
|
||||
[workspace.package]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
rust-version = "1.85.1"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
# Dragonball
|
||||
"src/dragonball",
|
||||
"src/dragonball/dbs_acpi",
|
||||
"src/dragonball/dbs_address_space",
|
||||
"src/dragonball/dbs_allocator",
|
||||
"src/dragonball/dbs_arch",
|
||||
"src/dragonball/dbs_boot",
|
||||
"src/dragonball/dbs_device",
|
||||
"src/dragonball/dbs_interrupt",
|
||||
"src/dragonball/dbs_legacy_devices",
|
||||
"src/dragonball/dbs_pci",
|
||||
"src/dragonball/dbs_tdx",
|
||||
"src/dragonball/dbs_upcall",
|
||||
"src/dragonball/dbs_utils",
|
||||
"src/dragonball/dbs_virtio_devices",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
# TODO: Add all excluded crates to root workspace
|
||||
exclude = [
|
||||
"src/agent",
|
||||
"src/tools",
|
||||
"src/libs",
|
||||
"src/runtime-rs",
|
||||
|
||||
# We are cloning and building rust packages under
|
||||
# "tools/packaging/kata-deploy/local-build/build" folder, which may mislead
|
||||
# those packages to think they are part of the kata root workspace
|
||||
"tools/packaging/kata-deploy/local-build/build",
|
||||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
# Rust-VMM crates
|
||||
event-manager = "0.2.1"
|
||||
kvm-bindings = "0.6.0"
|
||||
kvm-ioctls = "=0.12.1"
|
||||
linux-loader = "0.8.0"
|
||||
seccompiler = "0.5.0"
|
||||
vfio-bindings = "0.3.0"
|
||||
vfio-ioctls = "0.1.0"
|
||||
virtio-bindings = "0.1.0"
|
||||
virtio-queue = "0.7.0"
|
||||
vm-fdt = "0.2.0"
|
||||
vm-memory = "0.10.0"
|
||||
vm-superio = "0.5.0"
|
||||
vmm-sys-util = "0.11.0"
|
||||
|
||||
# Local dependencies from Dragonball Sandbox crates
|
||||
dbs-acpi = { path = "src/dragonball/dbs_acpi" }
|
||||
dbs-address-space = { path = "src/dragonball/dbs_address_space" }
|
||||
dbs-allocator = { path = "src/dragonball/dbs_allocator" }
|
||||
dbs-arch = { path = "src/dragonball/dbs_arch" }
|
||||
dbs-boot = { path = "src/dragonball/dbs_boot" }
|
||||
dbs-device = { path = "src/dragonball/dbs_device" }
|
||||
dbs-interrupt = { path = "src/dragonball/dbs_interrupt" }
|
||||
dbs-legacy-devices = { path = "src/dragonball/dbs_legacy_devices" }
|
||||
dbs-pci = { path = "src/dragonball/dbs_pci" }
|
||||
dbs-tdx = { path = "src/dragonball/dbs_tdx" }
|
||||
dbs-upcall = { path = "src/dragonball/dbs_upcall" }
|
||||
dbs-utils = { path = "src/dragonball/dbs_utils" }
|
||||
dbs-virtio-devices = { path = "src/dragonball/dbs_virtio_devices" }
|
||||
|
||||
# Local dependencies from `src/lib`
|
||||
test-utils = { path = "src/libs/test-utils" }
|
||||
@@ -8,7 +8,6 @@ set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
runtimedir=${cidir}/../src/runtime
|
||||
genpolicydir=${cidir}/../src/tools/genpolicy
|
||||
|
||||
build_working_packages() {
|
||||
# working packages:
|
||||
@@ -41,11 +40,3 @@ build_working_packages() {
|
||||
}
|
||||
|
||||
build_working_packages
|
||||
|
||||
build_genpolicy() {
|
||||
echo "building genpolicy"
|
||||
pushd "${genpolicydir}" &>/dev/null
|
||||
make TRIPLE=aarch64-apple-darwin build
|
||||
}
|
||||
|
||||
build_genpolicy
|
||||
|
||||
@@ -43,22 +43,19 @@ WORKAROUND_9206_CRIO=${WORKAROUND_9206_CRIO:-no}
|
||||
# Leverage kata-deploy to install Kata Containers in the cluster.
|
||||
#
|
||||
apply_kata_deploy() {
|
||||
if ! command -v helm &>/dev/null; then
|
||||
echo "Helm not installed, installing in current location..."
|
||||
PATH=".:${PATH}"
|
||||
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | HELM_INSTALL_DIR='.' bash -s -- --no-sudo
|
||||
fi
|
||||
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
pushd "${katacontainers_repo_dir}" || die
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "${deploy_file}"
|
||||
|
||||
info "Applying kata-deploy"
|
||||
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
|
||||
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||
local version chart
|
||||
version=$(curl -sSL https://api.github.com/repos/kata-containers/kata-containers/releases/latest | jq .tag_name | tr -d '"')
|
||||
chart="oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy"
|
||||
oc apply -f "${deploy_file}"
|
||||
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
# Ensure any potential leftover is cleaned up ... and this secret usually is not in case of previous failures
|
||||
oc delete secret sh.helm.release.v1.kata-deploy.v1 -n kube-system || true
|
||||
|
||||
echo "Installing kata using helm ${chart} ${version}"
|
||||
helm install kata-deploy --wait --namespace kube-system --set "image.reference=${KATA_DEPLOY_IMAGE%%:*},image.tag=${KATA_DEPLOY_IMAGE##*:}" "${chart}" --version "${version}"
|
||||
info "Adding the kata runtime classes"
|
||||
oc apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml
|
||||
popd || die
|
||||
}
|
||||
|
||||
|
||||
@@ -177,13 +174,13 @@ wait_for_app_pods_message() {
|
||||
local namespace="$5"
|
||||
[[ -z "${pod_count}" ]] && pod_count=1
|
||||
[[ -z "${timeout}" ]] && timeout=60
|
||||
[[ -n "${namespace}" ]] && namespace=("-n" "${namespace}")
|
||||
[[ -n "${namespace}" ]] && namespace=" -n ${namespace} "
|
||||
local pod
|
||||
local pods
|
||||
local i
|
||||
SECONDS=0
|
||||
while :; do
|
||||
mapfile -t pods < <(oc get pods -l app="${app}" --no-headers=true "${namespace[@]}" | awk '{print $1}')
|
||||
mapfile -t pods < <(oc get pods -l app="${app}" --no-headers=true "${namespace}" | awk '{print $1}')
|
||||
[[ "${#pods}" -ge "${pod_count}" ]] && break
|
||||
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
|
||||
printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
||||
@@ -193,7 +190,7 @@ wait_for_app_pods_message() {
|
||||
local log
|
||||
for pod in "${pods[@]}"; do
|
||||
while :; do
|
||||
log=$(oc logs "${namespace[@]}" "${pod}")
|
||||
log=$(oc logs "${namespace}" "${pod}")
|
||||
echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break;
|
||||
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
|
||||
echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' "
|
||||
|
||||
@@ -12,33 +12,6 @@
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
|
||||
##################
|
||||
# Helper functions
|
||||
##################
|
||||
|
||||
# Sparse "git clone" supporting old git version
|
||||
# $1 - origin
|
||||
# $2 - revision
|
||||
# $3- - sparse checkout paths
|
||||
# Note: uses pushd to change into the clonned directory!
|
||||
git_sparse_clone() {
|
||||
local origin="$1"
|
||||
local revision="$2"
|
||||
shift 2
|
||||
local sparse_paths=("$@")
|
||||
|
||||
local repo
|
||||
repo=$(basename -s .git "${origin}")
|
||||
|
||||
git init "${repo}"
|
||||
pushd "${repo}" || exit 1
|
||||
git remote add origin "${origin}"
|
||||
git fetch --depth 1 origin "${revision}"
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set "${sparse_paths[@]}"
|
||||
git checkout FETCH_HEAD
|
||||
}
|
||||
|
||||
###############################
|
||||
# Disable security to allow e2e
|
||||
###############################
|
||||
@@ -143,40 +116,33 @@ az network vnet subnet update \
|
||||
for NODE_NAME in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}'); do [[ "${NODE_NAME}" =~ 'worker' ]] && kubectl label node "${NODE_NAME}" node.kubernetes.io/worker=; done
|
||||
|
||||
# CAA artifacts
|
||||
if [[ -z "${CAA_TAG}" ]]; then
|
||||
if [[ -n "${CAA_IMAGE}" ]]; then
|
||||
echo "CAA_IMAGE (${CAA_IMAGE}) is set but CAA_TAG isn't, which is not supported. Please specify both or none"
|
||||
exit 1
|
||||
fi
|
||||
TAGS="$(curl https://quay.io/api/v1/repository/confidential-containers/cloud-api-adaptor/tag/?onlyActiveTags=true)"
|
||||
DIGEST=$(echo "${TAGS}" | jq -r '.tags[] | select(.name | contains("latest-amd64")) | .manifest_digest')
|
||||
CAA_TAG="$(echo "${TAGS}" | jq -r '.tags[] | select(.manifest_digest | contains("'"${DIGEST}"'")) | .name' | grep -v "latest")"
|
||||
fi
|
||||
if [[ -z "${CAA_IMAGE}" ]]; then
|
||||
CAA_IMAGE="quay.io/confidential-containers/cloud-api-adaptor"
|
||||
fi
|
||||
CAA_IMAGE="quay.io/confidential-containers/cloud-api-adaptor"
|
||||
TAGS="$(curl https://quay.io/api/v1/repository/confidential-containers/cloud-api-adaptor/tag/?onlyActiveTags=true)"
|
||||
DIGEST=$(echo "${TAGS}" | jq -r '.tags[] | select(.name | contains("latest-amd64")) | .manifest_digest')
|
||||
CAA_TAG="$(echo "${TAGS}" | jq -r '.tags[] | select(.manifest_digest | contains("'"${DIGEST}"'")) | .name' | grep -v "latest")"
|
||||
|
||||
# Get latest PP image
|
||||
if [[ -z "${PP_IMAGE_ID}" ]]; then
|
||||
SUCCESS_TIME=$(curl -s \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/confidential-containers/cloud-api-adaptor/actions/workflows/azure-nightly-build.yml/runs?status=success" \
|
||||
| jq -r '.workflow_runs[0].updated_at')
|
||||
PP_IMAGE_ID="/CommunityGalleries/cocopodvm-d0e4f35f-5530-4b9c-8596-112487cdea85/Images/podvm_image0/Versions/$(date -u -jf "%Y-%m-%dT%H:%M:%SZ" "${SUCCESS_TIME}" "+%Y.%m.%d" 2>/dev/null || date -d "${SUCCESS_TIME}" +%Y.%m.%d)"
|
||||
fi
|
||||
SUCCESS_TIME=$(curl -s \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/confidential-containers/cloud-api-adaptor/actions/workflows/azure-nightly-build.yml/runs?status=success" \
|
||||
| jq -r '.workflow_runs[0].updated_at')
|
||||
PP_IMAGE_ID="/CommunityGalleries/cocopodvm-d0e4f35f-5530-4b9c-8596-112487cdea85/Images/podvm_image0/Versions/$(date -u -jf "%Y-%m-%dT%H:%M:%SZ" "${SUCCESS_TIME}" "+%Y.%m.%d" 2>/dev/null || date -d "${SUCCESS_TIME}" +%Y.%m.%d)"
|
||||
|
||||
echo "AZURE_REGION=\"${AZURE_REGION}\""
|
||||
echo "PP_REGION=\"${PP_REGION}\""
|
||||
echo "AZURE_RESOURCE_GROUP=\"${AZURE_RESOURCE_GROUP}\""
|
||||
echo "PP_RESOURCE_GROUP=\"${PP_RESOURCE_GROUP}\""
|
||||
echo "PP_SUBNET_ID=\"${PP_SUBNET_ID}\""
|
||||
echo "CAA_IMAGE=\"${CAA_IMAGE}\""
|
||||
echo "CAA_TAG=\"${CAA_TAG}\""
|
||||
echo "PP_IMAGE_ID=\"${PP_IMAGE_ID}\""
|
||||
echo "AZURE_REGION: \"${AZURE_REGION}\""
|
||||
echo "PP_REGION: \"${PP_REGION}\""
|
||||
echo "AZURE_RESOURCE_GROUP: \"${AZURE_RESOURCE_GROUP}\""
|
||||
echo "PP_RESOURCE_GROUP: \"${PP_RESOURCE_GROUP}\""
|
||||
echo "PP_SUBNET_ID: \"${PP_SUBNET_ID}\""
|
||||
echo "CAA_TAG: \"${CAA_TAG}\""
|
||||
echo "PP_IMAGE_ID: \"${PP_IMAGE_ID}\""
|
||||
|
||||
# Clone and configure caa
|
||||
git_sparse_clone "https://github.com/confidential-containers/cloud-api-adaptor.git" "${CAA_GIT_SHA:-main}" "src/cloud-api-adaptor/install/"
|
||||
echo "CAA_GIT_SHA=\"$(git rev-parse HEAD)\""
|
||||
git clone --depth 1 --no-checkout https://github.com/confidential-containers/cloud-api-adaptor.git
|
||||
pushd cloud-api-adaptor
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src/cloud-api-adaptor/install/
|
||||
git checkout
|
||||
echo "CAA_GIT_SHA: \"$(git rev-parse HEAD)\""
|
||||
pushd src/cloud-api-adaptor
|
||||
cat <<EOF > install/overlays/azure/workload-identity.yaml
|
||||
apiVersion: apps/v1
|
||||
@@ -242,8 +208,12 @@ echo "AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}" >> install/overlays/azure/serv
|
||||
echo "AZURE_TENANT_ID=${AZURE_TENANT_ID}" >> install/overlays/azure/service-principal.env
|
||||
|
||||
# Deploy Operator
|
||||
git_sparse_clone "https://github.com/confidential-containers/operator" "${OPERATOR_SHA:-main}" "config/"
|
||||
echo "OPERATOR_SHA=\"$(git rev-parse HEAD)\""
|
||||
git clone --depth 1 --no-checkout https://github.com/confidential-containers/operator
|
||||
pushd operator
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set "config/"
|
||||
git checkout
|
||||
echo "OPERATOR_SHA: \"$(git rev-parse HEAD)\""
|
||||
oc apply -k "config/release"
|
||||
oc apply -k "config/samples/ccruntime/peer-pods"
|
||||
popd
|
||||
@@ -257,7 +227,7 @@ popd
|
||||
SECONDS=0
|
||||
( while [[ "${SECONDS}" -lt 360 ]]; do
|
||||
kubectl get runtimeclass | grep -q kata-remote && exit 0
|
||||
done; exit 1 ) || { echo "kata-remote runtimeclass not initialized in 60s"; kubectl -n confidential-containers-system get all; echo; echo "kubectl -n confidential-containers-system describe all"; kubectl -n confidential-containers-system describe all; echo; echo CAA; kubectl -n confidential-containers-system logs daemonset.apps/cloud-api-adaptor-daemonset; echo pre-install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-pre-install-daemon; echo install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-daemon-install; exit 1; }
|
||||
done; exit 1 ) || { echo "kata-remote runtimeclass not initialized in 60s"; kubectl -n confidential-containers-system get all; echo; echo CAA; kubectl -n confidential-containers-system logs daemonset.apps/cloud-api-adaptor-daemonset; echo pre-install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-pre-install-daemon; echo install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-daemon-install; exit 1; }
|
||||
|
||||
|
||||
################
|
||||
|
||||
@@ -450,7 +450,7 @@ You can build and install the guest kernel image as shown [here](../tools/packag
|
||||
# Install a hypervisor
|
||||
|
||||
When setting up Kata using a [packaged installation method](install/README.md#installing-on-a-linux-system), the
|
||||
`QEMU` VMM is installed automatically. Cloud-Hypervisor, Firecracker and StratoVirt VMMs are available from the [release tarballs](https://github.com/kata-containers/kata-containers/releases), as well as through [`kata-deploy`](../tools/packaging/kata-deploy/helm-chart/README.md).
|
||||
`QEMU` VMM is installed automatically. Cloud-Hypervisor, Firecracker and StratoVirt VMMs are available from the [release tarballs](https://github.com/kata-containers/kata-containers/releases), as well as through [`kata-deploy`](../tools/packaging/kata-deploy/README.md).
|
||||
You may choose to manually build your VMM/hypervisor.
|
||||
|
||||
## Build a custom QEMU
|
||||
|
||||
@@ -166,65 +166,19 @@ moment.
|
||||
See [this issue](https://github.com/kata-containers/runtime/issues/2812) for more details.
|
||||
[Another issue](https://github.com/kata-containers/kata-containers/issues/1728) focuses on the case of `emptyDir`.
|
||||
|
||||
### Kubernetes [hostPath][k8s-hostpath] volumes
|
||||
## Host resource sharing
|
||||
|
||||
In Kata, Kubernetes hostPath volumes can mount host directories and
|
||||
regular files into the guest VM via filesystem sharing, if it is enabled
|
||||
through the `shared_fs` [configuration][runtime-config] flag.
|
||||
|
||||
By default:
|
||||
|
||||
- Non-TEE environment: Filesystem sharing is used to mount host files.
|
||||
- TEE environment: Filesystem sharing is disabled. Instead, host files
|
||||
are copied into the guest VM when the container starts, and file
|
||||
changes are *not* synchronized between the host and the guest.
|
||||
|
||||
In some cases, the behavior of hostPath volumes in Kata is further
|
||||
different compared to `runc` containers:
|
||||
|
||||
**Mounting host block devices**: When a hostPath volume is of type
|
||||
[`BlockDevice`][k8s-blockdevice], Kata hotplugs the host block device
|
||||
into the guest and exposes it directly to the container.
|
||||
|
||||
**Mounting guest devices**: When the source path of a hostPath volume is
|
||||
under `/dev`, and the path either corresponds to a host device or is not
|
||||
accessible by the Kata shim, the Kata agent bind mounts the source path
|
||||
directly from the *guest* filesystem into the container.
|
||||
|
||||
[runtime-config]: /src/runtime/README.md#configuration
|
||||
[k8s-hostpath]: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
|
||||
[k8s-blockdevice]: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath-volume-types
|
||||
|
||||
### Mounting `procfs` and `sysfs`
|
||||
|
||||
For security reasons, the following mounts are disallowed:
|
||||
|
||||
| Type | Source | Destination | Rationale |
|
||||
|-------------------|-----------|----------------------------------|----------------|
|
||||
| `bind` | `!= proc` | `/proc` | CVE-2019-16884 |
|
||||
| `bind` | `*` | `/proc/*` (see exceptions below) | CVE-2019-16884 |
|
||||
| `proc \|\| sysfs` | `*` | not a directory (e.g. symlink) | CVE-2019-19921 |
|
||||
|
||||
For bind mounts under /proc, these destinations are allowed:
|
||||
|
||||
* `/proc/cpuinfo`
|
||||
* `/proc/diskstats`
|
||||
* `/proc/meminfo`
|
||||
* `/proc/stat`
|
||||
* `/proc/swaps`
|
||||
* `/proc/uptime`
|
||||
* `/proc/loadavg`
|
||||
* `/proc/net/dev`
|
||||
|
||||
## Privileged containers
|
||||
### Privileged containers
|
||||
|
||||
Privileged support in Kata is essentially different from `runc` containers.
|
||||
The container runs with elevated capabilities within the guest.
|
||||
The container runs with elevated capabilities within the guest and is granted
|
||||
access to guest devices instead of the host devices.
|
||||
This is also true with using `securityContext privileged=true` with Kubernetes.
|
||||
|
||||
Importantly, the default behavior to pass the host devices to a
|
||||
privileged container is not supported in Kata Containers and needs to be
|
||||
disabled, see [Privileged Kata Containers](how-to/privileged.md).
|
||||
The container may also be granted full access to a subset of host devices
|
||||
(https://github.com/kata-containers/runtime/issues/1568).
|
||||
|
||||
See [Privileged Kata Containers](how-to/privileged.md) for how to configure some of this behavior.
|
||||
|
||||
# Appendices
|
||||
|
||||
|
||||
@@ -83,7 +83,3 @@ Documents that help to understand and contribute to Kata Containers.
|
||||
If you have a suggestion for how we can improve the
|
||||
[website](https://katacontainers.io), please raise an issue (or a PR) on
|
||||
[the repository that holds the source for the website](https://github.com/OpenStackweb/kata-netlify-refresh).
|
||||
|
||||
### Toolchain Guidance
|
||||
|
||||
* [Toolchain Guidance](./Toochain-Guidance.md)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# Toolchains
|
||||
|
||||
As a community we want to strike a balance between having up-to-date toolchains, to receive the
|
||||
latest security fixes and to be able to benefit from new features and packages, whilst not being
|
||||
too bleeding edge and disrupting downstream and other consumers. As a result we have the following
|
||||
guidelines (note, not hard rules) for our go and rust toolchains that we are attempting to try out:
|
||||
|
||||
## Go toolchain
|
||||
|
||||
Go is released [every six months](https://go.dev/wiki/Go-Release-Cycle) with support for the
|
||||
[last two major release versions](https://go.dev/doc/devel/release#policy). We always want to
|
||||
ensure that we are on a supported version so we receive security fixes. To try and make
|
||||
things easier for some of our users, we aim to be using the older of the two supported major
|
||||
versions, unless there is a compelling reason to adopt the newer version.
|
||||
|
||||
In practice this means that we bump our major version of the go toolchain every six months to
|
||||
version (1.x-1) in response to a new version (1.x) coming out, which makes our current version
|
||||
(1.x-2) no longer supported. We will bump the minor version whenever required to satisfy
|
||||
dependency updates, or security fixes.
|
||||
|
||||
Our go toolchain version is recorded in [`versions.yaml`](../versions.yaml) under
|
||||
`.languages.golang.version` and should match with the version in our `go.mod` files.
|
||||
|
||||
## Rust toolchain
|
||||
|
||||
Rust has a [six week](https://doc.rust-lang.org/book/appendix-05-editions.html#:~:text=The%20Rust%20language%20and%20compiler,these%20tiny%20changes%20add%20up.)
|
||||
release cycle and they only support the latest stable release, so if we wanted to remain on a
|
||||
supported release we would only ever build with the latest stable and bump every 6 weeks.
|
||||
However feedback from our community has indicated that this is a challenge as downstream consumers
|
||||
often want to get rust from their distro, or downstream fork and these struggle to keep up with
|
||||
the six week release schedule. As a result the community has agreed to try out a policy of
|
||||
"stable-2", where we aim to build with a rust version that is two versions behind the latest stable
|
||||
version.
|
||||
|
||||
In practice this should mean that we bump our rust toolchain every six weeks, to version
|
||||
1.x-2 when 1.x is released as stable and we should be picking up the latest point release
|
||||
of that version, if there were any.
|
||||
|
||||
The rust-toolchain that we are using is recorded in [`rust-toolchain.toml`](../rust-toolchain.toml).
|
||||
@@ -31,7 +31,6 @@
|
||||
- [Setting Sysctls with Kata](how-to-use-sysctls-with-kata.md)
|
||||
- [What Is VMCache and How To Enable It](what-is-vm-cache-and-how-do-I-use-it.md)
|
||||
- [What Is VM Templating and How To Enable It](what-is-vm-templating-and-how-do-I-use-it.md)
|
||||
- [How to Use Template in runtime-rs](how-to-use-template-in-runtime-rs.md)
|
||||
- [Privileged Kata Containers](privileged.md)
|
||||
- [How to load kernel modules in Kata Containers](how-to-load-kernel-modules-with-kata.md)
|
||||
- [How to use Kata Containers with `virtio-mem`](how-to-use-virtio-mem-with-kata.md)
|
||||
@@ -49,4 +48,3 @@
|
||||
- [How to use the Kata Agent Policy](how-to-use-the-kata-agent-policy.md)
|
||||
- [How to pull images in the guest](how-to-pull-images-in-guest-with-kata.md)
|
||||
- [How to use mem-agent to decrease the memory usage of Kata container](how-to-use-memory-agent.md)
|
||||
- [How to use seccomp with runtime-rs](how-to-use-seccomp-with-runtime-rs.md)
|
||||
@@ -318,7 +318,7 @@ Finally, an operational kata container with IBM Secure Execution is now running.
|
||||
|
||||
It is reasonable to expect that the manual steps mentioned above can be easily executed.
|
||||
Typically, you can use
|
||||
[kata-deploy](https://github.com/kata-containers/kata-containers/blob/main/tools/packaging/kata-deploy/helm-chart/README.md)
|
||||
[kata-deploy](https://github.com/kata-containers/kata-containers/blob/main/tools/packaging/kata-deploy/README.md)
|
||||
to install Kata Containers on a Kubernetes cluster. However, when leveraging IBM Secure Execution,
|
||||
you need to employ the confidential container's
|
||||
[operator](https://github.com/confidential-containers/operator).
|
||||
|
||||
@@ -97,8 +97,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.use_legacy_serial` | `boolean` | uses legacy serial device for guest's console (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.default_gpus` | uint32 | the minimum number of GPUs required for the VM. Only used by remote hypervisor to help with instance selection |
|
||||
| `io.katacontainers.config.hypervisor.default_gpu_model` | string | the GPU model required for the VM. Only used by remote hypervisor to help with instance selection |
|
||||
| `io.katacontainers.config.hypervisor.block_device_num_queues` | `usize` | The number of queues to use for block devices (runtime-rs only) |
|
||||
| `io.katacontainers.config.hypervisor.block_device_queue_size` | uint32 | The size of the of the queue to use for block devices (runtime-rs only) |
|
||||
|
||||
## Container Options
|
||||
| Key | Value Type | Comments |
|
||||
|
||||
@@ -104,20 +104,12 @@ LOW_WATER_MARK=32768
|
||||
sudo dmsetup create "${POOL_NAME}" \
|
||||
--table "0 ${LENGTH_IN_SECTORS} thin-pool ${META_DEV} ${DATA_DEV} ${DATA_BLOCK_SIZE} ${LOW_WATER_MARK}"
|
||||
|
||||
# Determine plugin name based on containerd config version
|
||||
CONFIG_VERSION=$(containerd config dump | awk '/^version/ {print $3}')
|
||||
if [ "$CONFIG_VERSION" -ge 2 ]; then
|
||||
PLUGIN="io.containerd.snapshotter.v1.devmapper"
|
||||
else
|
||||
PLUGIN="devmapper"
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
#
|
||||
# Add this to your config.toml configuration file and restart containerd daemon
|
||||
#
|
||||
[plugins]
|
||||
[plugins."${PLUGIN}"]
|
||||
[plugins.devmapper]
|
||||
pool_name = "${POOL_NAME}"
|
||||
root_path = "${DATA_DIR}"
|
||||
base_image_size = "10GB"
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
## Introduction
|
||||
|
||||
To enhance security, Kata Containers supports using seccomp to restrict the hypervisor's system calls. Previously, this was only supported for a subset of hypervisors in runtime-go. Now, the runtime-rs also supports seccomp. This document describes how to enable/disable the seccomp feature for the corresponding hypervisor in runtime-rs.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
1. Ensure your system's kernel supports **seccomp**.
|
||||
2. Confirm that each of the following virtual machines can run correctly on your system.
|
||||
|
||||
## Configure seccomp
|
||||
|
||||
With the exception of `qemu`, seccomp is enabled by default for all other supported hypervisors. Their corresponding built-in functionalities are also enabled by default.
|
||||
|
||||
### QEMU
|
||||
|
||||
As with runtime-go, you need to modify the following in your **configuration file**. These parameters will be passed directly to the `qemu` startup command line. For more details on the parameters, you can refer to: [https://www.qemu.org/docs/master/system/qemu-manpage.html](https://www.qemu.org/docs/master/system/qemu-manpage.html)
|
||||
|
||||
``` toml
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
# For example, `seccompsandbox= "on,obsolete=deny,spawn=deny,resourcecontrol=deny"`
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
seccompsandbox="on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
```
|
||||
### Cloud Hypervisor, Firecracker and Dragonball
|
||||
|
||||
The **seccomp** functionality is enabled by default for the following three hypervisors: `cloud hypervisor`, `firecracker`, and `dragonball`.
|
||||
|
||||
The seccomp rules for `cloud hypervisor` and `firecracker` are built directly into their executable files. For `dragonball`, the relevant configuration is currently located at `src/runtime-rs/crates/hypervisor/src/dragonball/seccomp.rs`.
|
||||
|
||||
To disable this functionality for these hypervisors, you can modify the following configuration options in your **configuration file**.
|
||||
|
||||
``` toml
|
||||
# Disable the 'seccomp' feature from Cloud Hypervisor, firecracker or dragonball, default false
|
||||
disable_seccomp = true
|
||||
```
|
||||
|
||||
## Implementation details
|
||||
|
||||
For `qemu`, `cloud hypervisor`, and `firecracker`, their **seccomp** functionality is built into the respective executable files you are using. **runtime-rs** simply provides command-line arguments for their launch based on the configuration file.
|
||||
|
||||
For `dragonball`, a set of allowed system calls is currently provided for the entire **runtime-rs** process, and the process is prevented from using any system calls outside of this whitelist. As mentioned above, this set is located at `src/runtime-rs/crates/hypervisor/src/dragonball/seccomp.rs`.
|
||||
@@ -1,119 +0,0 @@
|
||||
# How to Use Template in runtime-rs
|
||||
|
||||
## What is VM Templating
|
||||
|
||||
VM templating is a Kata Containers feature that enables new VM creation using a cloning technique. When enabled, new VMs are created by cloning from a pre-created template VM, and they will share the same initramfs, kernel and agent memory in readonly mode. It is very much like a process fork done by the kernel but here we *fork* VMs.
|
||||
|
||||
For more details on VM templating, refer to the [What is VM templating and how do I use it](./what-is-vm-templating-and-how-do-I-use-it.md) article.
|
||||
|
||||
## How to Enable VM Templating
|
||||
|
||||
VM templating can be enabled by changing your Kata Containers config file (`/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml`, overridden by `/etc/kata-containers/configuration.toml` if provided) such that:
|
||||
|
||||
- `qemu` version `v4.1.0` or above is specified in `hypervisor.qemu`->`path` section
|
||||
- `enable_template = true`
|
||||
- `template_path = "/run/vc/vm/template"` (default value, can be customized as needed)
|
||||
- `initrd =` is set
|
||||
- `image =` option is commented out or removed
|
||||
- `shared_fs =` option is commented out or removed
|
||||
- `default_memory =` should be set to more than 256MB
|
||||
|
||||
Then you can create a VM template for later usage by calling:
|
||||
|
||||
### Initialize and create the VM template
|
||||
The `factory init` command creates a VM template by launching a new VM, initializing the Kata Agent, then pausing and saving its state (memory and device snapshots) to the template directory. This saved template is used to rapidly clone new VMs using QEMU's memory sharing capabilities.
|
||||
|
||||
```bash
|
||||
sudo kata-ctl factory init
|
||||
```
|
||||
|
||||
### Check the status of the VM template
|
||||
|
||||
The `factory status` command checks whether a VM template currently exists by verifying the presence of template files (memory snapshot and device state). It will output "VM factory is on" if the template exists, or "VM factory is off" otherwise.
|
||||
|
||||
```bash
|
||||
sudo kata-ctl factory status
|
||||
```
|
||||
|
||||
### Destroy and clean up the VM template
|
||||
|
||||
The `factory destroy` command removes the VM template by remove the `tmpfs` filesystem and deleting the template directory along with all its contents.
|
||||
|
||||
```bash
|
||||
sudo kata-ctl factory destroy
|
||||
```
|
||||
|
||||
## How to Create a New VM from VM Template
|
||||
In the Go version of Kata Containers, the VM templating mechanism is implemented using virtio-9p (9pfs). However, 9pfs is not supported in runtime-rs due to its poor performance, limited cache coherence, and security risks. Instead, runtime-rs adopts `VirtioFS` as the default mechanism to provide rootfs for containers and VMs.
|
||||
|
||||
Yet, when enabling the VM template mechanism, `VirtioFS` introduces conflicts in memory sharing because its DAX-based shared memory mapping overlaps with the template's page-sharing design. To resolve these conflicts and ensure strict isolation between cloned VMs, runtime-rs replaces `VirtioFS` with the snapshotter approach — specifically, the `blockfile` snapshotter.
|
||||
|
||||
The `blockfile` snapshotter is used in runtime-rs because it provides each VM with an independent block-based root filesystem, ensuring strong isolation and full compatibility with the VM templating mechanism.
|
||||
|
||||
### Configure Snapshotter
|
||||
|
||||
#### Check if `Blockfile` Snapshotter is Available
|
||||
```bash
|
||||
ctr plugins ls | grep blockfile
|
||||
```
|
||||
|
||||
If not available, continue with the following steps:
|
||||
|
||||
#### Create Scratch File
|
||||
```bash
|
||||
dd if=/dev/zero of=/opt/containerd/blockfile bs=1M count=500
|
||||
sudo mkfs.ext4 /opt/containerd/blockfile
|
||||
```
|
||||
|
||||
#### Configure containerd
|
||||
Edit the containerd configuration file:
|
||||
```bash
|
||||
sudo vim /etc/containerd/config.toml
|
||||
```
|
||||
Add or modify the following configuration for the `blockfile` snapshotter:
|
||||
```toml
|
||||
[plugins."io.containerd.snapshotter.v1.blockfile"]
|
||||
scratch_file = "/opt/containerd/blockfile"
|
||||
root_path = ""
|
||||
fs_type = "ext4"
|
||||
mount_options = []
|
||||
recreate_scratch = true
|
||||
```
|
||||
|
||||
#### Restart containerd
|
||||
After modifying the configuration, restart containerd to apply changes:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart containerd
|
||||
```
|
||||
|
||||
### Run Container with `blockfile` Snapshotter
|
||||
After the VM template is created, you can pull an image and run a container using the `blockfile` snapshotter:
|
||||
|
||||
```bash
|
||||
ctr run --rm -t --snapshotter blockfile docker.io/library/busybox:latest template sh
|
||||
```
|
||||
|
||||
We can verify whether a VM was launched from a template or started normally by checking the launch parameters — if the parameters contain `incoming`, it indicates that the VM was started from a template rather than created directly.
|
||||
|
||||
## Performance Test
|
||||
|
||||
The comparative experiment between **template-based VM** creation and **direct VM** creation showed that the template-based approach achieved a ≈ **73.2%** reduction in startup latency (average launch time of **0.6s** vs. **0.82s**) and a ≈ **79.8%** reduction in memory usage (average memory usage of **178.2 MiB** vs. **223.2 MiB**), demonstrating significant improvements in VM startup efficiency and resource utilization.
|
||||
|
||||
The test script is as follows:
|
||||
|
||||
```bash
|
||||
# Clear the page cache, dentries, and inodes to free up memory
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
# Display the current memory usage
|
||||
free -h
|
||||
|
||||
# Create 100 normal VMs and template-based VMs, and track the time
|
||||
time for I in $(seq 100); do
|
||||
echo -n " ${I}th" # Display the iteration number
|
||||
ctr run -d --runtime io.containerd.kata.v2 --snapshotter blockfile docker.io/library/busybox:latest normal/template${I}
|
||||
done
|
||||
|
||||
# Display the memory usage again after running the test
|
||||
free -h
|
||||
@@ -32,24 +32,11 @@ Kubernetes users can encode in `base64` format their Policy documents, and add t
|
||||
|
||||
### Encode a Policy file
|
||||
|
||||
For example, the [`allow-all-except-exec-process.rego`](../../src/kata-opa/allow-all-except-exec-process.rego) sample policy file is different from the [default Policy](../../src/kata-opa/allow-all.rego) because it rejects any `ExecProcess` requests. To encode this policy file, you need to:
|
||||
- Embed the policy inside an init data struct
|
||||
- Compress
|
||||
- Base64 encode
|
||||
For example:
|
||||
For example, the [`allow-all-except-exec-process.rego`](../../src/kata-opa/allow-all-except-exec-process.rego) sample policy file is different from the [default Policy](../../src/kata-opa/allow-all.rego) because it rejects any `ExecProcess` requests. You can encode this policy file:
|
||||
|
||||
```bash
|
||||
$ STRING="$(< allow-all-except-exec-process.rego)"
|
||||
$ cat <<EOF | gzip -c | base64 -w0
|
||||
version = "0.1.0"
|
||||
algorithm = "sha256"
|
||||
|
||||
[data]
|
||||
"policy.rego" = '''
|
||||
$STRING
|
||||
'''
|
||||
EOF
|
||||
H4sIAAAAAAAAA42UTW/TQBCG7/4Vq/QQOCQKQXCo1ENIAkRqiGWnpBJCaGKP7RXrXTM7DnV/PRMiVUh07R582J3H8/XO7AnJa2fVjRrNpm+ms1EEpnSkuarPd76C+bv3oyj6lgPD92jUOKOzbkpYupEA4/E4ulJL13Sky4rVq+y1ms/mb9VWZ+S8K1iM1DgClijRlcBpvLqf3OoMrcfJJkfLutBI12rRQFbhZD6dCRfJ4SeUqOSz/OMSNopyLKA1rBZ5vkjiLyhBj458gr9a9KyubxRTi/9i6W9oQualcR5TzrUNElLZR20waCcExqWzDNoi9WMp2PzoHkLQSi7JdQPUJ+QtMuksWLQQu912fZK+BZHz7QolaRN0c6s9bywjFZBhL5W4lsPEFuvPjhvTlh+6mNwx2MudNdLDZXwnf4SYGFo/3O64NWZTy+SEgAQhT1lECQZKsHan4UgXLGUw+FWTzHjh0woIt661HGxJgh4xT0RoV6/w1IO19XAOKfJFTxmxva6DRQsX/12jIKBLC0Y0Er2DuUutxMM5nak9QaZt2cOwf4En1ww42nN3OK+w14/B4u+a/CWLesHWTYU1Eph+GS/w0470Y/1LcgDNA40/yKOMzw/tE7N+wOx/NwUYj9H5qf4DsX93tO4FAAA=
|
||||
$ base64 -w 0 allow-all-except-exec-process.rego
|
||||
cGFja2FnZSBhZ2VudF9wb2xpY3kKCmRlZmF1bHQgQWRkQVJQTmVpZ2hib3JzUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgQWRkU3dhcFJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IENsb3NlU3RkaW5SZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBDb3B5RmlsZVJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IENyZWF0ZUNvbnRhaW5lclJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IENyZWF0ZVNhbmRib3hSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBEZXN0cm95U2FuZGJveFJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IEdldE1ldHJpY3NSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBHZXRPT01FdmVudFJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IEd1ZXN0RGV0YWlsc1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IExpc3RJbnRlcmZhY2VzUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgTGlzdFJvdXRlc1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IE1lbUhvdHBsdWdCeVByb2JlUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgT25saW5lQ1BVTWVtUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUGF1c2VDb250YWluZXJSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBQdWxsSW1hZ2VSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBSZWFkU3RyZWFtUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVtb3ZlQ29udGFpbmVyUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVtb3ZlU3RhbGVWaXJ0aW9mc1NoYXJlTW91bnRzUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVzZWVkUmFuZG9tRGV2UmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVzdW1lQ29udGFpbmVyUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgU2V0R3Vlc3REYXRlVGltZVJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFNldFBvbGljeVJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFNpZ25hbFByb2Nlc3NSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBTdGFydENvbnRhaW5lclJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFN0YXJ0VHJhY2luZ1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFN0YXRzQ29udGFpbmVyUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgU3RvcFRyYWNpbmdSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBUdHlXaW5SZXNpemVSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVDb250YWluZXJSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVFcGhlbWVyYWxNb3VudHNSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVJbnRlcmZhY2VSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVSb3V0ZXNSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBXYWl0UHJvY2Vzc1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFdyaXRlU3RyZWFtUmVxdWVzdCA6PSB0cnVlCgpkZWZhdWx0IEV4ZWNQcm9jZXNzUmVxdWVzdCA6PSBmYWxzZQo=
|
||||
```
|
||||
|
||||
### Attach the Policy to a pod
|
||||
@@ -62,7 +49,7 @@ kind: Pod
|
||||
metadata:
|
||||
name: policy-exec-rejected
|
||||
annotations:
|
||||
io.katacontainers.config.hypervisor.cc_init_data: H4sIAAAAAAAAA42UTW/TQBCG7/4Vq/QQOCQKQXCo1ENIAkRqiGWnpBJCaGKP7RXrXTM7DnV/PRMiVUh07R582J3H8/XO7AnJa2fVjRrNpm+ms1EEpnSkuarPd76C+bv3oyj6lgPD92jUOKOzbkpYupEA4/E4ulJL13Sky4rVq+y1ms/mb9VWZ+S8K1iM1DgClijRlcBpvLqf3OoMrcfJJkfLutBI12rRQFbhZD6dCRfJ4SeUqOSz/OMSNopyLKA1rBZ5vkjiLyhBj458gr9a9KyubxRTi/9i6W9oQualcR5TzrUNElLZR20waCcExqWzDNoi9WMp2PzoHkLQSi7JdQPUJ+QtMuksWLQQu912fZK+BZHz7QolaRN0c6s9bywjFZBhL5W4lsPEFuvPjhvTlh+6mNwx2MudNdLDZXwnf4SYGFo/3O64NWZTy+SEgAQhT1lECQZKsHan4UgXLGUw+FWTzHjh0woIt661HGxJgh4xT0RoV6/w1IO19XAOKfJFTxmxva6DRQsX/12jIKBLC0Y0Er2DuUutxMM5nak9QaZt2cOwf4En1ww42nN3OK+w14/B4u+a/CWLesHWTYU1Eph+GS/w0470Y/1LcgDNA40/yKOMzw/tE7N+wOx/NwUYj9H5qf4DsX93tO4FAAA=
|
||||
io.katacontainers.config.agent.policy: cGFja2FnZSBhZ2VudF9wb2xpY3kKCmRlZmF1bHQgQWRkQVJQTmVpZ2hib3JzUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgQWRkU3dhcFJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IENsb3NlU3RkaW5SZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBDb3B5RmlsZVJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IENyZWF0ZUNvbnRhaW5lclJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IENyZWF0ZVNhbmRib3hSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBEZXN0cm95U2FuZGJveFJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IEdldE1ldHJpY3NSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBHZXRPT01FdmVudFJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IEd1ZXN0RGV0YWlsc1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IExpc3RJbnRlcmZhY2VzUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgTGlzdFJvdXRlc1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IE1lbUhvdHBsdWdCeVByb2JlUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgT25saW5lQ1BVTWVtUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUGF1c2VDb250YWluZXJSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBQdWxsSW1hZ2VSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBSZWFkU3RyZWFtUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVtb3ZlQ29udGFpbmVyUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVtb3ZlU3RhbGVWaXJ0aW9mc1NoYXJlTW91bnRzUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVzZWVkUmFuZG9tRGV2UmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgUmVzdW1lQ29udGFpbmVyUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgU2V0R3Vlc3REYXRlVGltZVJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFNldFBvbGljeVJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFNpZ25hbFByb2Nlc3NSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBTdGFydENvbnRhaW5lclJlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFN0YXJ0VHJhY2luZ1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFN0YXRzQ29udGFpbmVyUmVxdWVzdCA6PSB0cnVlCmRlZmF1bHQgU3RvcFRyYWNpbmdSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBUdHlXaW5SZXNpemVSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVDb250YWluZXJSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVFcGhlbWVyYWxNb3VudHNSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVJbnRlcmZhY2VSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBVcGRhdGVSb3V0ZXNSZXF1ZXN0IDo9IHRydWUKZGVmYXVsdCBXYWl0UHJvY2Vzc1JlcXVlc3QgOj0gdHJ1ZQpkZWZhdWx0IFdyaXRlU3RyZWFtUmVxdWVzdCA6PSB0cnVlCgpkZWZhdWx0IEV4ZWNQcm9jZXNzUmVxdWVzdCA6PSBmYWxzZQo=
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
@@ -79,7 +66,7 @@ Create the pod:
|
||||
$ kubectl apply -f pod1.yaml
|
||||
```
|
||||
|
||||
While creating the Pod sandbox, the Kata Shim will notice the `io.katacontainers.config.hypervisor.cc_init_data` annotation and will create the init data device on the host and mount it on the guest as a block device. The agent then reads the init data struct from this device and sets the policy if present.
|
||||
While creating the Pod sandbox, the Kata Shim will notice the `io.katacontainers.config.agent.policy` annotation and will send the Policy document to the Kata Agent - by sending a `SetPolicy` request. Note that this request will fail if the default Policy, included in the Guest image, doesn't allow this `SetPolicy` request. If the `SetPolicy` request is rejected by the Guest, the Kata Shim will fail to start the Pod sandbox.
|
||||
|
||||
# How is the Policy being enforced?
|
||||
|
||||
|
||||
@@ -6,4 +6,4 @@ Container deployments utilize explicit or implicit file sharing between host fil
|
||||
|
||||
As of the 2.0 release of Kata Containers, [virtio-fs](https://virtio-fs.gitlab.io/) is the default filesystem sharing mechanism.
|
||||
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](../../tools/packaging/kata-deploy/helm-chart/README.md).
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](../../tools/packaging/kata-deploy/README.md#kubernetes-quick-start).
|
||||
|
||||
@@ -1,25 +1,22 @@
|
||||
# Privileged Kata Containers
|
||||
|
||||
> [!WARNING]
|
||||
> Whilst this functionality is supported, it can decrease the security of Kata Containers if not configured correctly.
|
||||
|
||||
Kata Containers supports creation of containers that are "privileged" (i.e. have additional capabilities and access
|
||||
that is not normally granted).
|
||||
|
||||
## Enabling privileged containers without host devices
|
||||
## Warnings
|
||||
|
||||
> [!TIP]
|
||||
> When Kata Containers is installed through
|
||||
> [kata-deploy](/tools/packaging/kata-deploy/helm-chart/README.md#kata-deploy-helm-chart), this mitigation is configured
|
||||
> out of the box, hence there is no action required in that case.
|
||||
**Warning:** Whilst this functionality is supported, it can decrease the security of Kata Containers if not configured
|
||||
correctly.
|
||||
|
||||
By default, a privileged container attempts to expose all devices from the host. This is generally not supported in Kata
|
||||
Containers as the container is running a different kernel than the host.
|
||||
### Host Devices
|
||||
|
||||
Instead, the following sections document how to disable this behavior in different container runtimes. Note that this
|
||||
mitigation does not affect a container's ability to mount *guest* devices.
|
||||
By default, when privileged is enabled for a container, all the `/dev/*` block devices from the host are mounted
|
||||
into the guest. This will allow the privileged container inside the Kata guest to gain access to mount any block device
|
||||
from the host, a potentially undesirable side-effect that decreases the security of Kata.
|
||||
|
||||
## Containerd
|
||||
The following sections document how to configure this behavior in different container runtimes.
|
||||
|
||||
#### Containerd
|
||||
|
||||
The Containerd allows configuring the privileged host devices behavior for each runtime in the containerd config. This is
|
||||
done with the `privileged_without_host_devices` option. Setting this to `true` will disable hot plugging of the host
|
||||
@@ -46,7 +43,7 @@ See below example config:
|
||||
- [How to use Kata Containers and containerd with Kubernetes](how-to-use-k8s-with-containerd-and-kata.md)
|
||||
- [Containerd CRI config documentation](https://github.com/containerd/containerd/blob/main/docs/cri/config.md)
|
||||
|
||||
## CRI-O
|
||||
#### CRI-O
|
||||
|
||||
Similar to containerd, CRI-O allows configuring the privileged host devices
|
||||
behavior for each runtime in the CRI config. This is done with the
|
||||
|
||||
@@ -8,11 +8,50 @@ Kata Containers requires nested virtualization or bare metal. Check
|
||||
[hardware requirements](./../../README.md#hardware-requirements) to see if your system is capable of running Kata
|
||||
Containers.
|
||||
|
||||
The Kata Deploy Helm chart is the preferred way to install all of the binaries and
|
||||
## Packaged installation methods
|
||||
|
||||
The packaged installation method uses your distribution's native package format (such as RPM or DEB).
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> We encourage you to select an installation method that provides
|
||||
> automatic updates, to ensure you get the latest security updates and
|
||||
> bug fixes.
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case |
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. |
|
||||
| [Using kata-deploy Helm chart](#kata-deploy-helm-chart) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. |
|
||||
|
||||
### Kata Deploy Helm Chart
|
||||
|
||||
The Kata Deploy Helm chart is a convenient way to install all of the binaries and
|
||||
artifacts required to run Kata Containers on Kubernetes.
|
||||
|
||||
[Use Kata Deploy Helm Chart](/tools/packaging/kata-deploy/helm-chart/README.md) to install Kata Containers on a Kubernetes Cluster.
|
||||
|
||||
### Official packages
|
||||
|
||||
Kata packages are provided by official distribution repositories for:
|
||||
|
||||
| Distribution (link to installation guide) | Minimum versions |
|
||||
|----------------------------------------------------------|--------------------------------------------------------------------------------|
|
||||
| [CentOS](centos-installation-guide.md) | 8 |
|
||||
| [Fedora](fedora-installation-guide.md) | 34 |
|
||||
|
||||
### Automatic Installation
|
||||
|
||||
[Use `kata-manager`](/utils/README.md) to automatically install a working Kata Containers system.
|
||||
|
||||
## Installing on a Cloud Service Platform
|
||||
|
||||
* [Amazon Web Services (AWS)](aws-installation-guide.md)
|
||||
* [Google Compute Engine (GCE)](gce-installation-guide.md)
|
||||
* [Microsoft Azure](azure-installation-guide.md)
|
||||
* [Minikube](minikube-installation-guide.md)
|
||||
* [VEXXHOST OpenStack Cloud](vexxhost-installation-guide.md)
|
||||
|
||||
## Further information
|
||||
|
||||
* [upgrading document](../Upgrading.md)
|
||||
|
||||
135
docs/install/aws-installation-guide.md
Normal file
135
docs/install/aws-installation-guide.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Install Kata Containers on Amazon Web Services
|
||||
|
||||
Kata Containers on Amazon Web Services (AWS) makes use of [i3.metal](https://aws.amazon.com/ec2/instance-types/i3/) instances. Most of the installation procedure is identical to that for Kata on your preferred distribution, except that you have to run it on bare metal instances since AWS doesn't support nested virtualization yet. This guide walks you through creating an i3.metal instance.
|
||||
|
||||
## Install and Configure AWS CLI
|
||||
|
||||
### Requirements
|
||||
|
||||
* Python:
|
||||
* Python 2 version 2.6.5+
|
||||
* Python 3 version 3.3+
|
||||
|
||||
### Install
|
||||
|
||||
Install with this command:
|
||||
|
||||
```bash
|
||||
$ pip install awscli --upgrade --user
|
||||
```
|
||||
|
||||
### Configure
|
||||
|
||||
First, verify it:
|
||||
|
||||
```bash
|
||||
$ aws --version
|
||||
```
|
||||
|
||||
Then configure it:
|
||||
|
||||
```bash
|
||||
$ aws configure
|
||||
```
|
||||
|
||||
Specify the required parameters:
|
||||
|
||||
```
|
||||
AWS Access Key ID []: <your-key-id-from-iam>
|
||||
AWS Secret Access Key []: <your-secret-access-key-from-iam>
|
||||
Default region name []: <your-aws-region-for-your-i3-metal-instance>
|
||||
Default output format [None]: <yaml-or-json-or-empty>
|
||||
```
|
||||
|
||||
Alternatively, you can create the files: `~/.aws/credentials` and `~/.aws/config`:
|
||||
|
||||
```bash
|
||||
$ cat <<EOF > ~/.aws/credentials
|
||||
[default]
|
||||
aws_access_key_id = <your-key-id-from-iam>
|
||||
aws_secret_access_key = <your-secret-access-key-from-iam>
|
||||
EOF
|
||||
$ cat <<EOF > ~/.aws/config
|
||||
[default]
|
||||
region = <your-aws-region-for-your-i3-metal-instance>
|
||||
EOF
|
||||
```
|
||||
|
||||
For more information on how to get AWS credentials please refer to [this guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html). Alternatively, you can ask the administrator of your AWS account to issue one with the AWS CLI:
|
||||
|
||||
```sh
|
||||
$ aws_username="myusername"
|
||||
$ aws iam create-access-key --user-name="$aws_username"
|
||||
```
|
||||
|
||||
More general AWS CLI guidelines can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/installing.html).
|
||||
|
||||
## Create or Import an EC2 SSH key pair
|
||||
|
||||
You will need this to access your instance.
|
||||
|
||||
To create:
|
||||
|
||||
```bash
|
||||
$ aws ec2 create-key-pair --key-name MyKeyPair | grep KeyMaterial | cut -d: -f2- | tr -d ' \n\"\,' > MyKeyPair.pem
|
||||
$ chmod 400 MyKeyPair.pem
|
||||
```
|
||||
|
||||
Alternatively to import using your public SSH key:
|
||||
|
||||
```bash
|
||||
$ aws ec2 import-key-pair --key-name "MyKeyPair" --public-key-material file://MyKeyPair.pub
|
||||
```
|
||||
|
||||
## Launch i3.metal instance
|
||||
|
||||
Get the latest Bionic Ubuntu AMI (Amazon Image) or the latest AMI for the Linux distribution you would like to use. For example:
|
||||
|
||||
```bash
|
||||
$ aws ec2 describe-images --owners 099720109477 --filters "Name=name,Values=ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server*" --query 'sort_by(Images, &CreationDate)[].ImageId '
|
||||
```
|
||||
|
||||
This command will produce output similar to the following:
|
||||
|
||||
```
|
||||
[
|
||||
...
|
||||
"ami-063aa838bd7631e0b",
|
||||
"ami-03d5270fcb641f79b"
|
||||
]
|
||||
```
|
||||
|
||||
Launch the EC2 instance and pick IP the `INSTANCEID`:
|
||||
|
||||
```bash
|
||||
$ aws ec2 run-instances --image-id ami-03d5270fcb641f79b --count 1 --instance-type i3.metal --key-name MyKeyPair --associate-public-ip-address > /tmp/aws.json
|
||||
$ export INSTANCEID=$(grep InstanceId /tmp/aws.json | cut -d: -f2- | tr -d ' \n\"\,')
|
||||
```
|
||||
|
||||
Wait for the instance to come up, the output of the following command should be `running`:
|
||||
|
||||
```bash
|
||||
$ aws ec2 describe-instances --instance-id=${INSTANCEID} | grep running | cut -d: -f2- | tr -d ' \"\,'
|
||||
```
|
||||
|
||||
Get the public IP address for the instances:
|
||||
|
||||
```bash
|
||||
$ export IP=$(aws ec2 describe-instances --instance-id=${INSTANCEID} | grep PublicIpAddress | cut -d: -f2- | tr -d ' \n\"\,')
|
||||
```
|
||||
|
||||
Refer to [this guide](https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-launch.html) for more details on how to launch instances with the AWS CLI.
|
||||
|
||||
SSH into the machine
|
||||
|
||||
```bash
|
||||
$ ssh -i MyKeyPair.pem ubuntu@${IP}
|
||||
```
|
||||
|
||||
Go onto the next step.
|
||||
|
||||
## Install Kata
|
||||
|
||||
The process for installing Kata itself on bare metal is identical to that of a virtualization-enabled VM.
|
||||
|
||||
For detailed information to install Kata on your distribution of choice, see the [Kata Containers installation user guides](../install/README.md).
|
||||
18
docs/install/azure-installation-guide.md
Normal file
18
docs/install/azure-installation-guide.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Install Kata Containers on Microsoft Azure
|
||||
|
||||
Kata Containers on Azure use nested virtualization to provide an identical installation
|
||||
experience to Kata on your preferred Linux distribution.
|
||||
|
||||
This guide assumes you have an Azure account set up and tools to remotely login to your virtual
|
||||
machine (SSH). Instructions will use the Azure Portal to avoid
|
||||
local dependencies and setup.
|
||||
|
||||
## Create a new virtual machine with nesting support
|
||||
|
||||
Create a new virtual machine with:
|
||||
* Nesting support (v3 series)
|
||||
* your distro of choice
|
||||
|
||||
## Set up with distribution specific quick start
|
||||
|
||||
Follow distribution specific [install guides](../install/README.md#packaged-installation-methods).
|
||||
21
docs/install/centos-installation-guide.md
Normal file
21
docs/install/centos-installation-guide.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Install Kata Containers on CentOS
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E dnf install -y centos-release-advanced-virtualization
|
||||
$ sudo -E dnf module disable -y virt:rhel
|
||||
$ source /etc/os-release
|
||||
$ cat <<EOF | sudo -E tee /etc/yum.repos.d/kata-containers.repo
|
||||
[kata-containers]
|
||||
name=Kata Containers
|
||||
baseurl=http://mirror.centos.org/\$contentdir/\$releasever/virt/\$basearch/kata-containers
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
skip_if_unavailable=1
|
||||
EOF
|
||||
$ sudo -E dnf install -y kata-containers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
10
docs/install/fedora-installation-guide.md
Normal file
10
docs/install/fedora-installation-guide.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Install Kata Containers on Fedora
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E dnf -y install kata-containers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
127
docs/install/gce-installation-guide.md
Normal file
127
docs/install/gce-installation-guide.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Install Kata Containers on Google Compute Engine
|
||||
|
||||
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime check` checks for nested virtualization, but does not fail if support is not found.
|
||||
|
||||
As a pre-requisite this guide assumes an installed and configured instance of the [Google Cloud SDK](https://cloud.google.com/sdk/downloads). For a zero-configuration option, all of the commands below were been tested under [Google Cloud Shell](https://cloud.google.com/shell/) (as of Jun 2018). Verify your `gcloud` installation and configuration:
|
||||
|
||||
```bash
|
||||
$ gcloud info || { echo "ERROR: no Google Cloud SDK"; exit 1; }
|
||||
```
|
||||
|
||||
## Create an Image with Nested Virtualization Enabled
|
||||
|
||||
VM images on GCE are grouped into families under projects. Officially supported images are automatically discoverable with `gcloud compute images list`. That command produces a list similar to the following (likely with different image names):
|
||||
|
||||
```bash
|
||||
$ gcloud compute images list
|
||||
NAME PROJECT FAMILY DEPRECATED STATUS
|
||||
centos-7-v20180523 centos-cloud centos-7 READY
|
||||
coreos-stable-1745-5-0-v20180531 coreos-cloud coreos-stable READY
|
||||
cos-beta-67-10575-45-0 cos-cloud cos-beta READY
|
||||
cos-stable-66-10452-89-0 cos-cloud cos-stable READY
|
||||
debian-9-stretch-v20180510 debian-cloud debian-9 READY
|
||||
rhel-7-v20180522 rhel-cloud rhel-7 READY
|
||||
sles-11-sp4-v20180523 suse-cloud sles-11 READY
|
||||
ubuntu-1604-xenial-v20180522 ubuntu-os-cloud ubuntu-1604-lts READY
|
||||
ubuntu-1804-bionic-v20180522 ubuntu-os-cloud ubuntu-1804-lts READY
|
||||
```
|
||||
|
||||
Each distribution has its own project, and each project can host images for multiple versions of the distribution, typically grouped into families. We recommend you select images by project and family, rather than by name. This ensures any scripts or other automation always works with a non-deprecated image, including security updates, updates to GCE-specific scripts, etc.
|
||||
|
||||
### Create the Image
|
||||
|
||||
The following example (substitute your preferred distribution project and image family) produces an image with nested virtualization enabled in your currently active GCE project:
|
||||
|
||||
```bash
|
||||
$ SOURCE_IMAGE_PROJECT=ubuntu-os-cloud
|
||||
$ SOURCE_IMAGE_FAMILY=ubuntu-1804-lts
|
||||
$ IMAGE_NAME=${SOURCE_IMAGE_FAMILY}-nested
|
||||
|
||||
$ gcloud compute images create \
|
||||
--source-image-project $SOURCE_IMAGE_PROJECT \
|
||||
--source-image-family $SOURCE_IMAGE_FAMILY \
|
||||
--licenses=https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx \
|
||||
$IMAGE_NAME
|
||||
```
|
||||
|
||||
If successful, `gcloud` reports that the image was created. Verify that the image has the nested virtualization license with `gcloud compute images describe $IMAGE_NAME`. This produces output like the following (some fields have been removed for clarity and to redact personal info):
|
||||
|
||||
```yaml
|
||||
diskSizeGb: '10'
|
||||
kind: compute#image
|
||||
licenseCodes:
|
||||
- '1002001'
|
||||
- '5926592092274602096'
|
||||
licenses:
|
||||
- https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx
|
||||
- https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/licenses/ubuntu-1804-lts
|
||||
name: ubuntu-1804-lts-nested
|
||||
sourceImage: https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-v20180522
|
||||
sourceImageId: '3280575157699667619'
|
||||
sourceType: RAW
|
||||
status: READY
|
||||
```
|
||||
|
||||
The primary criterion of interest here is the presence of the `enable-vmx` license. Without that licence Kata will not work. Without that license Kata does not work. The presence of that license instructs the Google Compute Engine hypervisor to enable Intel's VT-x instructions in virtual machines created from the image. Note that nested virtualization is only available in VMs running on Intel Haswell or later CPU micro-architectures.
|
||||
|
||||
### Verify VMX is Available
|
||||
|
||||
Assuming you created a nested-enabled image using the previous instructions, verify that VMs created from this image are VMX-enabled with the following:
|
||||
|
||||
1. Create a VM from the image created previously:
|
||||
|
||||
```bash
|
||||
$ gcloud compute instances create \
|
||||
--image $IMAGE_NAME \
|
||||
--machine-type n1-standard-2 \
|
||||
--min-cpu-platform "Intel Broadwell" \
|
||||
kata-testing
|
||||
```
|
||||
|
||||
> **NOTE**: In most zones the `--min-cpu-platform` argument can be omitted. It is only necessary in GCE Zones that include hosts based on Intel's Ivybridge platform.
|
||||
|
||||
2. Verify that the VMX CPUID flag is set:
|
||||
|
||||
```bash
|
||||
$ gcloud compute ssh kata-testing
|
||||
|
||||
# While ssh'd into the VM:
|
||||
$ [ -z "$(lscpu|grep GenuineIntel)" ] && { echo "ERROR: Need an Intel CPU"; exit 1; }
|
||||
```
|
||||
|
||||
If this fails, ensure you created your instance from the correct image and that the previously listed `enable-vmx` license is included.
|
||||
|
||||
## Install Kata
|
||||
|
||||
The process for installing Kata itself on a virtualization-enabled VM is identical to that for bare metal.
|
||||
|
||||
For detailed information to install Kata on your distribution of choice, see the [Kata Containers installation user guides](../install/README.md).
|
||||
|
||||
## Create a Kata-enabled Image
|
||||
|
||||
Optionally, after installing Kata, create an image to preserve the fruits of your labor:
|
||||
|
||||
```bash
|
||||
$ gcloud compute instances stop kata-testing
|
||||
$ gcloud compute images create \
|
||||
--source-disk kata-testing \
|
||||
kata-base
|
||||
```
|
||||
|
||||
The result is an image that includes any changes made to the `kata-testing` instance as well as the `enable-vmx` flag. Verify this with `gcloud compute images describe kata-base`. The result, which omits some fields for clarity, should be similar to the following:
|
||||
|
||||
```yaml
|
||||
diskSizeGb: '10'
|
||||
kind: compute#image
|
||||
licenseCodes:
|
||||
- '1002001'
|
||||
- '5926592092274602096'
|
||||
licenses:
|
||||
- https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx
|
||||
- https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/licenses/ubuntu-1804-lts
|
||||
name: kata-base
|
||||
selfLink: https://www.googleapis.com/compute/v1/projects/my-kata-project/global/images/kata-base
|
||||
sourceDisk: https://www.googleapis.com/compute/v1/projects/my-kata-project/zones/us-west1-a/disks/kata-testing
|
||||
sourceType: RAW
|
||||
status: READY
|
||||
```
|
||||
@@ -32,7 +32,7 @@ architectures:
|
||||
|
||||
### Kata Deploy Installation
|
||||
|
||||
Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/helm-chart/README.md).
|
||||
Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md).
|
||||
### Official packages
|
||||
`ToDo`
|
||||
### Automatic Installation
|
||||
|
||||
16
docs/install/vexxhost-installation-guide.md
Normal file
16
docs/install/vexxhost-installation-guide.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Install Kata Containers on VEXXHOST
|
||||
|
||||
Kata Containers on VEXXHOST use nested virtualization to provide an identical
|
||||
installation experience to Kata on your preferred Linux distribution.
|
||||
|
||||
This guide assumes you have an OpenStack public cloud account set up and tools
|
||||
to remotely connect to your virtual machine (SSH).
|
||||
|
||||
## Create a new virtual machine with nesting support
|
||||
|
||||
All regions support nested virtualization using the V2 flavors (those prefixed
|
||||
with v2). The recommended machine type for container workloads is `v2-highcpu` range.
|
||||
|
||||
## Set up with distribution specific quick start
|
||||
|
||||
Follow distribution specific [install guides](../install/README.md#packaged-installation-methods).
|
||||
@@ -419,7 +419,7 @@ You might need to disable Docker before initializing Kubernetes. Be aware
|
||||
that the OpenSSL container image built above will need to be exported from
|
||||
Docker and imported into containerd.
|
||||
|
||||
If Kata is installed through [`kata-deploy`](../../tools/packaging/kata-deploy/helm-chart/README.md)
|
||||
If Kata is installed through [`kata-deploy`](../../tools/packaging/kata-deploy/README.md)
|
||||
there will be multiple `configuration.toml` files associated with different
|
||||
hypervisors. Rather than add in the custom Kata kernel, Kata rootfs, and
|
||||
kernel modules to each `configuration.toml` as the default, instead use
|
||||
|
||||
90
src/agent/Cargo.lock
generated
90
src/agent/Cargo.lock
generated
@@ -459,9 +459,15 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
|
||||
dependencies = [
|
||||
"bit-vec",
|
||||
"bit-vec 0.8.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-vec"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
|
||||
|
||||
[[package]]
|
||||
name = "bit-vec"
|
||||
version = "0.8.0"
|
||||
@@ -659,6 +665,30 @@ dependencies = [
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cdi"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/cncf-tags/container-device-interface-rs?rev=3b1e83dda5efcc83c7a4f134466ec006b37109c9#3b1e83dda5efcc83c7a4f134466ec006b37109c9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
"const_format",
|
||||
"jsonschema",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"nix 0.24.3",
|
||||
"notify",
|
||||
"oci-spec",
|
||||
"once_cell",
|
||||
"path-clean",
|
||||
"regex",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
@@ -778,31 +808,6 @@ dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "container-device-interface"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "653849f0c250f73d9afab4b2a9a6b07adaee1f34c44ffa6f2d2c3f9392002c1a"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
"const_format",
|
||||
"jsonschema",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"nix 0.24.3",
|
||||
"notify",
|
||||
"oci-spec",
|
||||
"once_cell",
|
||||
"path-clean",
|
||||
"regex",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.7"
|
||||
@@ -1244,6 +1249,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"libz-sys",
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
@@ -2043,11 +2049,11 @@ dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"capctl",
|
||||
"cdi",
|
||||
"cfg-if",
|
||||
"cgroups-rs",
|
||||
"clap",
|
||||
"const_format",
|
||||
"container-device-interface",
|
||||
"derivative",
|
||||
"futures",
|
||||
"ipnetwork",
|
||||
@@ -2058,7 +2064,7 @@ dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"logging",
|
||||
"mem-agent",
|
||||
"mem-agent-lib",
|
||||
"netlink-packet-core",
|
||||
"netlink-packet-route",
|
||||
"netlink-sys 0.7.0",
|
||||
@@ -2259,6 +2265,17 @@ dependencies = [
|
||||
"uuid 0.8.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.3.8"
|
||||
@@ -2333,7 +2350,7 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mem-agent"
|
||||
name = "mem-agent-lib"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
@@ -3701,7 +3718,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"awaitgroup",
|
||||
"bit-vec",
|
||||
"bit-vec 0.6.3",
|
||||
"capctl",
|
||||
"caps",
|
||||
"cfg-if",
|
||||
@@ -4021,9 +4038,12 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.11"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
|
||||
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slash-formatter"
|
||||
@@ -4803,6 +4823,12 @@ version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5"
|
||||
|
||||
[[package]]
|
||||
name = "vcpkg"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.5"
|
||||
|
||||
@@ -13,12 +13,8 @@ lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.8.4", features = ["async"], default-features = false }
|
||||
protobuf = "3.7.2"
|
||||
libc = "0.2.94"
|
||||
|
||||
# Notes:
|
||||
# - Needs to stay in sync with libs
|
||||
# - Upgrading to 0.27+ will require code changes (see #11842)
|
||||
# Notes: nix needs to stay in sync with libs
|
||||
nix = "0.26.4"
|
||||
|
||||
capctl = "0.2.0"
|
||||
scan_fmt = "0.2.6"
|
||||
scopeguard = "1.0.0"
|
||||
@@ -85,10 +81,10 @@ kata-agent-policy = { path = "policy" }
|
||||
rustjail = { path = "rustjail" }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
mem-agent = { path = "../libs/mem-agent" }
|
||||
mem-agent = { path = "../mem-agent", package = "mem-agent-lib" }
|
||||
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types", features = ["safe-path"] }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../libs/logging" }
|
||||
@@ -167,6 +163,9 @@ clap.workspace = true
|
||||
strum.workspace = true
|
||||
strum_macros.workspace = true
|
||||
|
||||
# Agent Policy
|
||||
cdi = { git = "https://github.com/cncf-tags/container-device-interface-rs", rev = "3b1e83dda5efcc83c7a4f134466ec006b37109c9" }
|
||||
|
||||
# Local dependencies
|
||||
kata-agent-policy = { workspace = true, optional = true }
|
||||
mem-agent.workspace = true
|
||||
@@ -186,8 +185,6 @@ base64 = "0.22"
|
||||
sha2 = "0.10.8"
|
||||
async-compression = { version = "0.4.22", features = ["tokio", "gzip"] }
|
||||
|
||||
container-device-interface = "0.1.1"
|
||||
|
||||
[target.'cfg(target_arch = "s390x")'.dependencies]
|
||||
pv_core = { git = "https://github.com/ibm-s390-linux/s390-tools", rev = "4942504a9a2977d49989a5e5b7c1c8e07dc0fa41", package = "s390_pv_core" }
|
||||
|
||||
@@ -206,7 +203,6 @@ lto = true
|
||||
seccomp = ["rustjail/seccomp"]
|
||||
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
|
||||
agent-policy = ["kata-agent-policy"]
|
||||
init-data = []
|
||||
|
||||
[[bin]]
|
||||
name = "kata-agent"
|
||||
|
||||
@@ -41,14 +41,6 @@ ifeq ($(AGENT_POLICY),yes)
|
||||
override EXTRA_RUSTFEATURES += agent-policy
|
||||
endif
|
||||
|
||||
##VAR INIT_DATA=yes|no define if agent enables the init data feature
|
||||
INIT_DATA ?= yes
|
||||
|
||||
# Enable the init data fature of rust build
|
||||
ifeq ($(INIT_DATA),yes)
|
||||
override EXTRA_RUSTFEATURES += init-data
|
||||
endif
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
##VAR STANDARD_OCI_RUNTIME=yes|no define if agent enables standard oci runtime feature
|
||||
@@ -130,7 +122,7 @@ $(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
$(TARGET_PATH): show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) $(if $(findstring release,$(BUILD_TYPE)),--release) $(EXTRA_RUSTFEATURES)
|
||||
|
||||
$(GENERATED_FILES): %: %.in $(VERSION_FILE)
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
##TARGET optimize: optimized build
|
||||
|
||||
@@ -10,7 +10,7 @@ use anyhow::{bail, Result};
|
||||
use slog::{debug, error, info, warn};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
static POLICY_LOG_FILE: &str = "/tmp/policy.jsonl";
|
||||
static POLICY_LOG_FILE: &str = "/tmp/policy.txt";
|
||||
static POLICY_DEFAULT_FILE: &str = "/etc/kata-opa/default-policy.rego";
|
||||
|
||||
/// Convenience macro to obtain the scope logger
|
||||
@@ -26,7 +26,7 @@ pub struct AgentPolicy {
|
||||
/// When true policy errors are ignored, for debug purposes.
|
||||
allow_failures: bool,
|
||||
|
||||
/// "/tmp/policy.jsonl" log file for policy activity.
|
||||
/// "/tmp/policy.txt" log file for policy activity.
|
||||
log_file: Option<tokio::fs::File>,
|
||||
|
||||
/// Regorus engine
|
||||
@@ -213,7 +213,7 @@ impl AgentPolicy {
|
||||
// The Policy text can be obtained directly from the pod YAML.
|
||||
}
|
||||
_ => {
|
||||
let log_entry = format!("{{\"kind\":\"{ep}\",\"request\":{input}}}\n");
|
||||
let log_entry = format!("[\"ep\":\"{ep}\",{input}],\n\n");
|
||||
|
||||
if let Err(e) = log_file.write_all(log_entry.as_bytes()).await {
|
||||
warn!(sl!(), "policy: log_eval_input: write_all failed: {}", e);
|
||||
|
||||
@@ -44,7 +44,7 @@ async-trait.workspace = true
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.3.0", optional = true }
|
||||
zbus = "3.12.0"
|
||||
bit-vec = "0.8.0"
|
||||
bit-vec = "0.6.3"
|
||||
xattr = "0.2.3"
|
||||
|
||||
# Local dependencies
|
||||
|
||||
@@ -1037,12 +1037,6 @@ impl BaseContainer for LinuxContainer {
|
||||
let child_stderr: std::process::Stdio;
|
||||
|
||||
if tty {
|
||||
// NOTE(#11842): This code will require changes if we upgrade to nix 0.27+:
|
||||
// - `pseudo` will contain OwnedFds instead of RawFds.
|
||||
// - We'll have to use `OwnedFd::into_raw_fd()` which will
|
||||
// transfer the ownership to the caller.
|
||||
// - The duplication strategy will not change.
|
||||
|
||||
let pseudo = pty::openpty(None, None)?;
|
||||
p.term_master = Some(pseudo.master);
|
||||
let _ = fcntl::fcntl(pseudo.master, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
|
||||
@@ -1051,8 +1045,8 @@ impl BaseContainer for LinuxContainer {
|
||||
.map_err(|e| warn!(logger, "fcntl pseudo.slave {:?}", e));
|
||||
|
||||
child_stdin = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
|
||||
child_stdout = unsafe { std::process::Stdio::from_raw_fd(unistd::dup(pseudo.slave)?) };
|
||||
child_stderr = unsafe { std::process::Stdio::from_raw_fd(unistd::dup(pseudo.slave)?) };
|
||||
child_stdout = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
|
||||
child_stderr = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
|
||||
|
||||
if let Some(proc_io) = &mut p.proc_io {
|
||||
// A reference count used to clean up the term master fd.
|
||||
@@ -1920,7 +1914,7 @@ mod tests {
|
||||
let cgroups_path = format!(
|
||||
"/{}/dummycontainer{}",
|
||||
CGROUP_PARENT,
|
||||
since_the_epoch.as_micros()
|
||||
since_the_epoch.as_millis()
|
||||
);
|
||||
|
||||
let mut spec = SpecBuilder::default()
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
#[cfg(not(test))]
|
||||
use nix::mount;
|
||||
@@ -337,19 +336,25 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
|
||||
if mount_dest == PROC_PATH {
|
||||
// only allow a mount on-top of proc if it's source is "proc"
|
||||
let mount_source = m.source().as_ref().unwrap().display().to_string();
|
||||
unsafe {
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
let mount_source = m.source().as_ref().unwrap().display().to_string();
|
||||
if mount_source
|
||||
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
|
||||
.is_ok()
|
||||
{
|
||||
if stats.assume_init().f_type == PROC_SUPER_MAGIC {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
let statfs_ret = mount_source
|
||||
.with_nix_path(|path| unsafe { libc::statfs(path.as_ptr(), stats.as_mut_ptr()) })?;
|
||||
|
||||
return match Errno::result(statfs_ret) {
|
||||
Ok(_) if unsafe { stats.assume_init().f_type } == PROC_SUPER_MAGIC => Ok(()),
|
||||
Ok(_) | Err(_) => Err(anyhow!(format!(
|
||||
return Err(anyhow!(format!(
|
||||
"{} cannot be mounted to {} because it is not of type proc",
|
||||
&mount_source, &mount_dest
|
||||
))),
|
||||
};
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if mount_dest.starts_with(PROC_PATH) {
|
||||
|
||||
@@ -202,7 +202,7 @@ macro_rules! config_override {
|
||||
}
|
||||
};
|
||||
|
||||
($builder:ident, $config:ident, $field:ident, $func:ident) => {
|
||||
($builder:ident, $config:ident, $field:ident, $func: ident) => {
|
||||
if let Some(v) = $builder.$field {
|
||||
$config.$field = $func(&v)?;
|
||||
}
|
||||
@@ -661,8 +661,8 @@ impl AgentConfig {
|
||||
self.server_addr = addr;
|
||||
}
|
||||
|
||||
if let Ok(level) = env::var(LOG_LEVEL_ENV_VAR) {
|
||||
if let Ok(level) = logrus_to_slog_level(&level) {
|
||||
if let Ok(addr) = env::var(LOG_LEVEL_ENV_VAR) {
|
||||
if let Ok(level) = logrus_to_slog_level(&addr) {
|
||||
self.log_level = level;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ use anyhow::{anyhow, Context, Result};
|
||||
use cdi::annotations::parse_annotations;
|
||||
use cdi::cache::{new_cache, with_auto_refresh, CdiOption};
|
||||
use cdi::spec_dirs::with_spec_dirs;
|
||||
use container_device_interface as cdi;
|
||||
use kata_types::device::DeviceHandlerManager;
|
||||
use nix::sys::stat;
|
||||
use oci::{LinuxDeviceCgroup, Spec};
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#[cfg(feature = "init-data")]
|
||||
use std::{os::unix::fs::FileTypeExt, path::Path};
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
@@ -38,24 +37,8 @@ pub const AA_CONFIG_PATH: &str = concatcp!(INITDATA_PATH, "/aa.toml");
|
||||
pub const CDH_CONFIG_PATH: &str = concatcp!(INITDATA_PATH, "/cdh.toml");
|
||||
|
||||
/// Magic number of initdata device
|
||||
#[cfg(feature = "init-data")]
|
||||
pub const INITDATA_MAGIC_NUMBER: &[u8] = b"initdata";
|
||||
|
||||
/// initdata device with disk type 'vd*'
|
||||
#[cfg(feature = "init-data")]
|
||||
const INITDATA_PREFIX_DISK_VDX: &str = "vd";
|
||||
|
||||
/// initdata device with disk type 'sd*'
|
||||
#[cfg(feature = "init-data")]
|
||||
const INITDATA_PREFIX_DISK_SDX: &str = "sd";
|
||||
|
||||
#[cfg(not(feature = "init-data"))]
|
||||
async fn detect_initdata_device(logger: &Logger) -> Result<Option<String>> {
|
||||
debug!(logger, "Initdata is disabled");
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[cfg(feature = "init-data")]
|
||||
async fn detect_initdata_device(logger: &Logger) -> Result<Option<String>> {
|
||||
let dev_dir = Path::new("/dev");
|
||||
let mut read_dir = tokio::fs::read_dir(dev_dir).await?;
|
||||
@@ -63,15 +46,9 @@ async fn detect_initdata_device(logger: &Logger) -> Result<Option<String>> {
|
||||
let filename = entry.file_name();
|
||||
let filename = filename.to_string_lossy();
|
||||
debug!(logger, "Initdata check device `{filename}`");
|
||||
|
||||
// Currently there're two disk types supported:
|
||||
// virtio-blk (vd*) and virtio-scsi (sd*)
|
||||
if !filename.starts_with(INITDATA_PREFIX_DISK_VDX)
|
||||
&& !filename.starts_with(INITDATA_PREFIX_DISK_SDX)
|
||||
{
|
||||
if !filename.starts_with("vd") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let path = entry.path();
|
||||
|
||||
debug!(logger, "Initdata find potential device: `{path:?}`");
|
||||
|
||||
@@ -30,7 +30,6 @@ use nix::unistd::{self, dup, sync, Pid};
|
||||
use std::env;
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self, File};
|
||||
use std::io::ErrorKind;
|
||||
use std::os::unix::fs::{self as unixfs, FileTypeExt};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
@@ -466,17 +465,8 @@ fn attestation_binaries_available(logger: &Logger, procs: &GuestComponentsProcs)
|
||||
_ => vec![],
|
||||
};
|
||||
for binary in binaries.iter() {
|
||||
let exists = Path::new(binary)
|
||||
.try_exists()
|
||||
.unwrap_or_else(|error| match error.kind() {
|
||||
ErrorKind::NotFound => {
|
||||
warn!(logger, "{} not found", binary);
|
||||
false
|
||||
}
|
||||
_ => panic!("Path existence check failed for '{}': {}", binary, error),
|
||||
});
|
||||
|
||||
if !exists {
|
||||
if !Path::new(binary).exists() {
|
||||
warn!(logger, "{} not found", binary);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,17 +336,11 @@ mod tests {
|
||||
let plain = slog_term::PlainSyncDecorator::new(std::io::stdout());
|
||||
let logger = Logger::root(slog_term::FullFormat::new(plain).build().fuse(), o!());
|
||||
|
||||
// Detect actual filesystem types mounted in this environment
|
||||
// Z runners mount /dev as tmpfs, while normal systems use devtmpfs
|
||||
let dev_fs_type = get_mount_fs_type("/dev").unwrap_or_else(|_| String::from("devtmpfs"));
|
||||
let proc_fs_type = get_mount_fs_type("/proc").unwrap_or_else(|_| String::from("proc"));
|
||||
let sys_fs_type = get_mount_fs_type("/sys").unwrap_or_else(|_| String::from("sysfs"));
|
||||
|
||||
let test_cases = [
|
||||
("dev", "/dev", dev_fs_type.as_str()),
|
||||
("udev", "/dev", dev_fs_type.as_str()),
|
||||
("proc", "/proc", proc_fs_type.as_str()),
|
||||
("sysfs", "/sys", sys_fs_type.as_str()),
|
||||
("dev", "/dev", "devtmpfs"),
|
||||
("udev", "/dev", "devtmpfs"),
|
||||
("proc", "/proc", "proc"),
|
||||
("sysfs", "/sys", "sysfs"),
|
||||
];
|
||||
|
||||
for &(source, destination, fs_type) in &test_cases {
|
||||
@@ -387,22 +381,6 @@ mod tests {
|
||||
let drain = slog::Discard;
|
||||
let logger = slog::Logger::root(drain, o!());
|
||||
|
||||
// Detect filesystem type of root directory
|
||||
let tmp_fs_type = get_mount_fs_type("/").unwrap_or_else(|_| String::from("unknown"));
|
||||
|
||||
// Error messages that vary based on filesystem type
|
||||
const DEFAULT_ERROR_EPERM: &str = "Operation not permitted";
|
||||
const BTRFS_ERROR_ENODEV: &str = "No such device";
|
||||
|
||||
// Helper to select error message based on filesystem type (e.g. btrfs for s390x runners)
|
||||
let get_error_msg = |default: &'static str, btrfs_specific: &'static str| -> &'static str {
|
||||
if tmp_fs_type == "btrfs" && !btrfs_specific.is_empty() {
|
||||
btrfs_specific
|
||||
} else {
|
||||
default
|
||||
}
|
||||
};
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
test_user: TestUserType::Any,
|
||||
@@ -438,7 +416,7 @@ mod tests {
|
||||
fs_type: "bind",
|
||||
flags: MsFlags::empty(),
|
||||
options: "bind",
|
||||
error_contains: get_error_msg(DEFAULT_ERROR_EPERM, BTRFS_ERROR_ENODEV),
|
||||
error_contains: "Operation not permitted",
|
||||
},
|
||||
TestData {
|
||||
test_user: TestUserType::NonRootOnly,
|
||||
@@ -518,14 +496,7 @@ mod tests {
|
||||
|
||||
let err = result.unwrap_err();
|
||||
let error_msg = format!("{}", err);
|
||||
|
||||
assert!(
|
||||
error_msg.contains(d.error_contains),
|
||||
"{}: expected error containing '{}', got '{}'",
|
||||
msg,
|
||||
d.error_contains,
|
||||
error_msg
|
||||
);
|
||||
assert!(error_msg.contains(d.error_contains), "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -401,10 +401,7 @@ impl Handle {
|
||||
}
|
||||
|
||||
if let RouteAttribute::Oif(index) = attribute {
|
||||
route.device = match self.find_link(LinkFilter::Index(*index)).await {
|
||||
Ok(link) => link.name(),
|
||||
Err(_) => String::new(),
|
||||
};
|
||||
route.device = self.find_link(LinkFilter::Index(*index)).await?.name();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -912,27 +909,10 @@ mod tests {
|
||||
use super::*;
|
||||
use netlink_packet_route::address::AddressHeader;
|
||||
use netlink_packet_route::link::LinkHeader;
|
||||
use serial_test::serial;
|
||||
use std::iter;
|
||||
use std::process::Command;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
// Constants for ARP neighbor tests
|
||||
const TEST_DUMMY_INTERFACE: &str = "dummy_for_arp";
|
||||
const TEST_ARP_IP: &str = "192.0.2.127";
|
||||
|
||||
/// Helper function to check if the result is a netlink EACCES error
|
||||
fn is_netlink_permission_error<T>(result: &Result<T>) -> bool {
|
||||
if let Err(e) = result {
|
||||
let error_string = format!("{:?}", e);
|
||||
if error_string.contains("code: Some(-13)") {
|
||||
println!("INFO: skipping test - netlink operations are restricted in this environment (EACCES)");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn find_link_by_name() {
|
||||
let message = Handle::new()
|
||||
@@ -992,18 +972,18 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial(arp_neighbor_tests)]
|
||||
async fn list_routes() {
|
||||
clean_env_for_test_add_one_arp_neighbor(TEST_DUMMY_INTERFACE, TEST_ARP_IP);
|
||||
let devices: Vec<Interface> = Handle::new().unwrap().list_interfaces().await.unwrap();
|
||||
let all = Handle::new()
|
||||
.unwrap()
|
||||
.list_routes()
|
||||
.await
|
||||
.context(format!("available devices: {:?}", devices))
|
||||
.expect("Failed to list routes");
|
||||
|
||||
assert_ne!(all.len(), 0);
|
||||
|
||||
for r in &all {
|
||||
assert_ne!(r.device.len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1052,14 +1032,10 @@ mod tests {
|
||||
let lo = handle.find_link(LinkFilter::Name("lo")).await.unwrap();
|
||||
|
||||
for network in list {
|
||||
let result = handle.add_addresses(lo.index(), iter::once(network)).await;
|
||||
|
||||
// Skip test if netlink operations are restricted (EACCES = -13)
|
||||
if is_netlink_permission_error(&result) {
|
||||
return;
|
||||
}
|
||||
|
||||
result.expect("Failed to add IP");
|
||||
handle
|
||||
.add_addresses(lo.index(), iter::once(network))
|
||||
.await
|
||||
.expect("Failed to add IP");
|
||||
|
||||
// Make sure the address is there
|
||||
let result = handle
|
||||
@@ -1074,14 +1050,10 @@ mod tests {
|
||||
assert!(result.is_some());
|
||||
|
||||
// Update it
|
||||
let result = handle.add_addresses(lo.index(), iter::once(network)).await;
|
||||
|
||||
// Skip test if netlink operations are restricted (EACCES = -13)
|
||||
if is_netlink_permission_error(&result) {
|
||||
return;
|
||||
}
|
||||
|
||||
result.expect("Failed to delete address");
|
||||
handle
|
||||
.add_addresses(lo.index(), iter::once(network))
|
||||
.await
|
||||
.expect("Failed to delete address");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1116,7 +1088,7 @@ mod tests {
|
||||
.expect("prepare: failed to delete neigh");
|
||||
}
|
||||
|
||||
async fn prepare_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
fn prepare_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, ip);
|
||||
// modprobe dummy
|
||||
Command::new("modprobe")
|
||||
@@ -1130,9 +1102,9 @@ mod tests {
|
||||
.output()
|
||||
.expect("failed to add dummy interface");
|
||||
|
||||
// ip addr add 192.0.2.2/24 dev dummy
|
||||
// ip addr add 192.168.0.2/16 dev dummy
|
||||
Command::new("ip")
|
||||
.args(["addr", "add", "192.0.2.2/24", "dev", dummy_name])
|
||||
.args(["addr", "add", "192.168.0.2/16", "dev", dummy_name])
|
||||
.output()
|
||||
.expect("failed to add ip for dummy");
|
||||
|
||||
@@ -1141,26 +1113,24 @@ mod tests {
|
||||
.args(["link", "set", dummy_name, "up"])
|
||||
.output()
|
||||
.expect("failed to up dummy");
|
||||
|
||||
// Wait briefly to ensure the IP address addition is fully complete
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial(arp_neighbor_tests)]
|
||||
async fn test_add_one_arp_neighbor() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let mac = "6a:92:3a:59:70:aa";
|
||||
let to_ip = "169.254.1.1";
|
||||
let dummy_name = "dummy_for_arp";
|
||||
|
||||
prepare_env_for_test_add_one_arp_neighbor(TEST_DUMMY_INTERFACE, TEST_ARP_IP).await;
|
||||
prepare_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
|
||||
let mut ip_address = IPAddress::new();
|
||||
ip_address.set_address(TEST_ARP_IP.to_string());
|
||||
ip_address.set_address(to_ip.to_string());
|
||||
|
||||
let mut neigh = ARPNeighbor::new();
|
||||
neigh.set_toIPAddress(ip_address);
|
||||
neigh.set_device(TEST_DUMMY_INTERFACE.to_string());
|
||||
neigh.set_device(dummy_name.to_string());
|
||||
neigh.set_lladdr(mac.to_string());
|
||||
neigh.set_state(0x80);
|
||||
|
||||
@@ -1171,24 +1141,15 @@ mod tests {
|
||||
.expect("Failed to add ARP neighbor");
|
||||
|
||||
// ip neigh show dev dummy ip
|
||||
let output = Command::new("ip")
|
||||
.args(["neigh", "show", "dev", TEST_DUMMY_INTERFACE, TEST_ARP_IP])
|
||||
let stdout = Command::new("ip")
|
||||
.args(["neigh", "show", "dev", dummy_name, to_ip])
|
||||
.output()
|
||||
.expect("failed to show neigh");
|
||||
.expect("failed to show neigh")
|
||||
.stdout;
|
||||
|
||||
let stdout = std::str::from_utf8(&output.stdout).expect("failed to convert stdout");
|
||||
let stderr = std::str::from_utf8(&output.stderr).expect("failed to convert stderr");
|
||||
assert!(
|
||||
output.status.success(),
|
||||
"`ip neigh show` returned exit code {:?}. stderr: {:?}",
|
||||
output.status.code(),
|
||||
stderr
|
||||
);
|
||||
assert_eq!(
|
||||
stdout.trim(),
|
||||
format!("{} lladdr {} PERMANENT", TEST_ARP_IP, mac)
|
||||
);
|
||||
let stdout = std::str::from_utf8(&stdout).expect("failed to convert stdout");
|
||||
assert_eq!(stdout.trim(), format!("{} lladdr {} PERMANENT", to_ip, mac));
|
||||
|
||||
clean_env_for_test_add_one_arp_neighbor(TEST_DUMMY_INTERFACE, TEST_ARP_IP);
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,26 +59,10 @@ pub fn reseed_rng(data: &[u8]) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use nix::errno::Errno;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
/// Helper function to check if the result is an EPERM error
|
||||
fn is_permission_error(result: &Result<()>) -> bool {
|
||||
if let Err(e) = result {
|
||||
if let Some(errno) = e.downcast_ref::<Errno>() {
|
||||
if *errno == Errno::EPERM {
|
||||
println!(
|
||||
"EPERM: skipping test - reseeding RNG is not permitted in this environment"
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reseed_rng() {
|
||||
skip_if_not_root!();
|
||||
@@ -89,9 +73,6 @@ mod tests {
|
||||
// Ensure the buffer was filled.
|
||||
assert!(n == POOL_SIZE);
|
||||
let ret = reseed_rng(&seed);
|
||||
if is_permission_error(&ret) {
|
||||
return;
|
||||
}
|
||||
assert!(ret.is_ok());
|
||||
}
|
||||
|
||||
@@ -104,9 +85,6 @@ mod tests {
|
||||
// Ensure the buffer was filled.
|
||||
assert!(n == POOL_SIZE);
|
||||
let ret = reseed_rng(&seed);
|
||||
if is_permission_error(&ret) {
|
||||
return;
|
||||
}
|
||||
if nix::unistd::Uid::effective().is_root() {
|
||||
assert!(ret.is_ok());
|
||||
} else {
|
||||
|
||||
@@ -2417,7 +2417,7 @@ mod tests {
|
||||
let cgroups_path = format!(
|
||||
"/{}/dummycontainer{}",
|
||||
CGROUP_PARENT,
|
||||
since_the_epoch.as_micros()
|
||||
since_the_epoch.as_millis()
|
||||
);
|
||||
|
||||
let spec = SpecBuilder::default()
|
||||
@@ -2481,26 +2481,6 @@ mod tests {
|
||||
// normally this module should eixsts...
|
||||
m.name = "bridge".to_string();
|
||||
let result = load_kernel_module(&m);
|
||||
|
||||
// Skip test if loading kernel modules is not permitted
|
||||
// or kernel module is not found
|
||||
if let Err(e) = &result {
|
||||
let error_string = format!("{:?}", e);
|
||||
// Let's print out the error message first
|
||||
println!("DEBUG: error: {}", error_string);
|
||||
if error_string.contains("Operation not permitted")
|
||||
|| error_string.contains("EPERM")
|
||||
|| error_string.contains("Permission denied")
|
||||
{
|
||||
println!("INFO: skipping test - loading kernel modules is not permitted in this environment");
|
||||
return;
|
||||
}
|
||||
if error_string.contains("not found") {
|
||||
println!("INFO: skipping test - kernel module is not found in this environment");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(result.is_ok(), "load module should success");
|
||||
}
|
||||
|
||||
|
||||
@@ -858,7 +858,7 @@ mod tests {
|
||||
let cgroups_path = format!(
|
||||
"/{}/dummycontainer{}",
|
||||
CGROUP_PARENT,
|
||||
since_the_epoch.as_micros()
|
||||
since_the_epoch.as_millis()
|
||||
);
|
||||
|
||||
let spec = SpecBuilder::default()
|
||||
|
||||
72
Cargo.lock → src/dragonball/Cargo.lock
generated
72
Cargo.lock → src/dragonball/Cargo.lock
generated
@@ -4,18 +4,18 @@ version = 4
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.25.1"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b"
|
||||
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler2"
|
||||
version = "2.0.1"
|
||||
name = "adler"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||
|
||||
[[package]]
|
||||
name = "android-tzdata"
|
||||
@@ -64,17 +64,17 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.76"
|
||||
version = "0.3.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6"
|
||||
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -344,26 +344,20 @@ name = "dbs-pci"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"dbs-address-space",
|
||||
"dbs-allocator",
|
||||
"dbs-arch",
|
||||
"dbs-boot",
|
||||
"dbs-device",
|
||||
"dbs-interrupt",
|
||||
"dbs-utils",
|
||||
"dbs-virtio-devices",
|
||||
"downcast-rs",
|
||||
"kvm-bindings",
|
||||
"kvm-ioctls",
|
||||
"libc",
|
||||
"log",
|
||||
"serde",
|
||||
"thiserror 1.0.48",
|
||||
"vfio-bindings",
|
||||
"vfio-ioctls",
|
||||
"virtio-queue",
|
||||
"vm-memory",
|
||||
"vmm-sys-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -638,9 +632,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.1.5"
|
||||
version = "1.0.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb"
|
||||
checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"libz-sys",
|
||||
@@ -780,9 +774,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.32.3"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
|
||||
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
@@ -1121,9 +1115,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "kvm-ioctls"
|
||||
version = "0.12.1"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3d592c9b0da14bacab1fe89c78e7ed873b20cf7f502d0fc26f628d733215b1e5"
|
||||
checksum = "c3a321cabd827642499c77e27314f388dd83a717a5ca716b86476fb947f73ae4"
|
||||
dependencies = [
|
||||
"kvm-bindings",
|
||||
"libc",
|
||||
@@ -1250,12 +1244,11 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.9"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
|
||||
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
"simd-adler32",
|
||||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1453,9 +1446,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.37.3"
|
||||
version = "0.32.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe"
|
||||
checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
@@ -1757,9 +1750,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.26"
|
||||
version = "0.1.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
|
||||
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
@@ -1817,9 +1810,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "seccompiler"
|
||||
version = "0.5.0"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4ae55de56877481d112a559bbc12667635fdaf5e005712fd4e2b2fa50ffc884"
|
||||
checksum = "e01d1292a1131b22ccea49f30bd106f1238b5ddeec1a98d39268dcc31d540e68"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
@@ -1927,17 +1920,14 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "simd-adler32"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.11"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
|
||||
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog"
|
||||
@@ -2560,12 +2550,6 @@ dependencies = [
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
@@ -9,6 +9,58 @@ repository = "https://github.com/kata-containers/kata-containers.git"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"dbs_acpi",
|
||||
"dbs_address_space",
|
||||
"dbs_allocator",
|
||||
"dbs_arch",
|
||||
"dbs_boot",
|
||||
"dbs_device",
|
||||
"dbs_interrupt",
|
||||
"dbs_legacy_devices",
|
||||
"dbs_pci",
|
||||
"dbs_tdx",
|
||||
"dbs_upcall",
|
||||
"dbs_utils",
|
||||
"dbs_virtio_devices",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Rust-VMM crates
|
||||
event-manager = "0.2.1"
|
||||
kvm-bindings = "0.6.0"
|
||||
kvm-ioctls = "0.12.0"
|
||||
linux-loader = "0.8.0"
|
||||
seccompiler = "0.2.0"
|
||||
vfio-bindings = "0.3.0"
|
||||
vfio-ioctls = "0.1.0"
|
||||
virtio-bindings = "0.1.0"
|
||||
virtio-queue = "0.7.0"
|
||||
vm-fdt = "0.2.0"
|
||||
vm-memory = "0.10.0"
|
||||
vm-superio = "0.5.0"
|
||||
vmm-sys-util = "0.11.0"
|
||||
|
||||
# Local dependencies from Dragonball Sandbox crates
|
||||
dbs-acpi = { path = "dbs_acpi" }
|
||||
dbs-address-space = { path = "dbs_address_space" }
|
||||
dbs-allocator = { path = "dbs_allocator" }
|
||||
dbs-arch = { path = "dbs_arch" }
|
||||
dbs-boot = { path = "dbs_boot" }
|
||||
dbs-device = { path = "dbs_device" }
|
||||
dbs-interrupt = { path = "dbs_interrupt" }
|
||||
dbs-legacy-devices = { path = "dbs_legacy_devices" }
|
||||
dbs-pci = { path = "dbs_pci" }
|
||||
dbs-tdx = { path = "dbs_tdx" }
|
||||
dbs-upcall = { path = "dbs_upcall" }
|
||||
dbs-utils = { path = "dbs_utils" }
|
||||
dbs-virtio-devices = { path = "dbs_virtio_devices" }
|
||||
|
||||
# Local dependencies from `src/lib`
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.32"
|
||||
arc-swap = "1.5.0"
|
||||
@@ -31,12 +83,12 @@ kvm-bindings = { workspace = true }
|
||||
kvm-ioctls = { workspace = true }
|
||||
lazy_static = "1.2"
|
||||
libc = "0.2.39"
|
||||
linux-loader = { workspace = true }
|
||||
linux-loader = {workspace = true}
|
||||
log = "0.4.14"
|
||||
nix = "0.24.2"
|
||||
procfs = "0.12.0"
|
||||
prometheus = { version = "0.14.0", features = ["process"] }
|
||||
seccompiler = { workspace = true }
|
||||
seccompiler = {workspace = true}
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.9"
|
||||
@@ -44,7 +96,7 @@ slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1"
|
||||
tracing = "0.1.41"
|
||||
vmm-sys-util = { workspace = true }
|
||||
vmm-sys-util = {workspace = true}
|
||||
virtio-queue = { workspace = true, optional = true }
|
||||
vm-memory = { workspace = true, features = ["backend-mmap"] }
|
||||
crossbeam-channel = "0.5.6"
|
||||
@@ -66,14 +118,14 @@ virtio-blk = ["dbs-virtio-devices/virtio-blk", "virtio-queue"]
|
||||
virtio-net = ["dbs-virtio-devices/virtio-net", "virtio-queue"]
|
||||
# virtio-fs only work on atomic-guest-memory
|
||||
virtio-fs = [
|
||||
"dbs-virtio-devices/virtio-fs-pro",
|
||||
"virtio-queue",
|
||||
"atomic-guest-memory",
|
||||
"dbs-virtio-devices/virtio-fs-pro",
|
||||
"virtio-queue",
|
||||
"atomic-guest-memory",
|
||||
]
|
||||
virtio-mem = [
|
||||
"dbs-virtio-devices/virtio-mem",
|
||||
"virtio-queue",
|
||||
"atomic-guest-memory",
|
||||
"dbs-virtio-devices/virtio-mem",
|
||||
"virtio-queue",
|
||||
"atomic-guest-memory",
|
||||
]
|
||||
virtio-balloon = ["dbs-virtio-devices/virtio-balloon", "virtio-queue"]
|
||||
vhost-net = ["dbs-virtio-devices/vhost-net"]
|
||||
@@ -84,5 +136,5 @@ host-device = ["dep:vfio-bindings", "dep:vfio-ioctls", "dep:dbs-pci"]
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = [
|
||||
'cfg(feature, values("test-mock"))',
|
||||
'cfg(feature, values("test-mock"))',
|
||||
] }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user