Compare commits

..

11 Commits

Author SHA1 Message Date
stevenhorsman
97214445e9 workflows: Bump zizmor to latest
Bump zizmor to the latest version to pick up new rule updates.

Signed-off-by: stevenhorsman <steven@uk.ibm.com>
2026-04-10 14:37:01 +01:00
stevenhorsman
9274a1e63d workflows: Add timeouts
Recently I've seen a couple of occasions where
jobs have seemed to run infinitely. Add timeouts
for these jobs to stop this from happening if things
get into a bad state.

Signed-off-by: stevenhorsman <steven@uk.ibm.com>
2026-04-10 14:37:01 +01:00
stevenhorsman
85dad7c7a2 workflows: Add concurrency limits
It is good practice to add concurrency limits to automatically
cancel jobs that have been superceded and potentially stop
race conditions if we try and get artifacts by workflows and job id
rather than run id.

See https://docs.zizmor.sh/audits/#concurrency-limits

Assisted-by: IBM Bob

Signed-off-by: stevenhorsman <steven@uk.ibm.com>
2026-04-10 14:37:01 +01:00
Fabiano Fidêncio
fd6375d8d5 Merge pull request #12806 from kata-containers/topic/ci-run-runtime-rs-on-SNP
ci: Run qemu-snp-runtime-rs tests in the CI
2026-04-10 11:01:20 +02:00
Fabiano Fidêncio
218077506b Merge pull request #12769 from RuoqingHe/runtime-rs-allow-install-on-riscv
runtime-rs: Allow installation on RISC-V platforms
2026-04-10 10:24:40 +02:00
Fabiano Fidêncio
dca89485f0 Merge pull request #12802 from stevenhorsman/bump-golang-1.25.9
versions: bump golang to 1.25.9
2026-04-10 06:50:35 +02:00
Fabiano Fidêncio
5e1ab0aa7d tests: Support runtime-rs QEMU cmdline format in attestation test
The k8s-confidential-attestation test extracts the QEMU command line
from journal logs to compute the SNP launch measurement. It only
matched the Go runtime's log format ("launching <path> with: [<args>]"),
but runtime-rs logs differently ("qemu args: <args>").

Handle both formats so the test works with qemu-snp-runtime-rs.

Made-with: Cursor
Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
2026-04-09 16:35:08 +02:00
Fabiano Fidêncio
3b155ab0b1 ci: Run runtime-rs tests for SNP
As we're in the process to stabilise runtime-rs for the coming 4.0.0
release, we better start running as many tests as possible with that.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
2026-04-09 16:35:08 +02:00
stevenhorsman
31f9a5461b versions: bump golang to 1.25.9
Bump the go version to resolve CVEs:
- GO-2026-4947
- GO-2026-4946
- GO-2026-4870
- GO-2026-4869
- GO-2026-4865
- GO-2026-4864

Signed-off-by: stevenhorsman <steven@uk.ibm.com>
2026-04-09 08:59:40 +01:00
Ruoqing He
98ee385220 runtime-rs: Consolidate unsupported arch
Consolidate arch we don't support at the moment, and avoid hard coding
error messages per arch.

Signed-off-by: Ruoqing He <ruoqing.he@lingcage.com>
2026-04-09 04:18:50 +00:00
Ruoqing He
26ffe1223b runtime-rs: Allow install on riscv64 platform
runtime-rs works with QEMU on RISC-V platforms, let's enable
installation on RISC-V.

Signed-off-by: Ruoqing He <ruoqing.he@lingcage.com>
2026-04-09 04:18:50 +00:00
64 changed files with 519 additions and 565 deletions

View File

@@ -15,6 +15,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-basic-amd64
cancel-in-progress: true
jobs:
run-containerd-sandboxapi:
name: run-containerd-sandboxapi
@@ -26,6 +30,9 @@ jobs:
matrix:
containerd_version: ['active']
vmm: ['dragonball', 'cloud-hypervisor', 'qemu-runtime-rs']
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-sandboxapi-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
# TODO: enable me when https://github.com/containerd/containerd/issues/11640 is fixed
if: false
runs-on: ubuntu-22.04
@@ -89,6 +96,9 @@ jobs:
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'cloud-hypervisor', 'dragonball', 'qemu', 'qemu-runtime-rs']
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-run-containerd-stability-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
@@ -135,6 +145,9 @@ jobs:
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu', 'dragonball', 'qemu-runtime-rs']
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-run-nydus-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
@@ -188,6 +201,9 @@ jobs:
vmm:
- clh # cloud-hypervisor
- qemu
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-tracing-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
# TODO: enable me when https://github.com/kata-containers/kata-containers/issues/9763 is fixed
# TODO: Transition to free runner (see #9940).
if: false
@@ -233,6 +249,9 @@ jobs:
vmm:
- clh
- qemu
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-vfio-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
# TODO: enable with clh when https://github.com/kata-containers/kata-containers/issues/9764 is fixed
# TODO: enable with qemu when https://github.com/kata-containers/kata-containers/issues/9851 is fixed
# TODO: Transition to free runner (see #9940).
@@ -327,6 +346,9 @@ jobs:
- qemu
- cloud-hypervisor
- qemu-runtime-rs
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-nerdctl-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
env:
KATA_HYPERVISOR: ${{ matrix.vmm }}
@@ -377,6 +399,10 @@ jobs:
run-kata-agent-apis:
name: run-kata-agent-apis
runs-on: ubuntu-22.04
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-agent-api-amd64
cancel-in-progress: true
timeout-minutes: 30
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:

View File

@@ -13,6 +13,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-s390x
cancel-in-progress: true
permissions: {}
jobs:
@@ -26,6 +30,9 @@ jobs:
matrix:
containerd_version: ['active']
vmm: ['qemu-runtime-rs']
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-s390x-${{ toJSON(matrix) }}
cancel-in-progress: true
# TODO: enable me when https://github.com/containerd/containerd/issues/11640 is fixed
if: false
runs-on: s390x-large
@@ -89,6 +96,9 @@ jobs:
matrix:
containerd_version: ['lts', 'active']
vmm: ['qemu']
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-s390x-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: s390x-large
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}

View File

@@ -12,6 +12,10 @@ on:
required: true
type: string
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-riscv64
cancel-in-progress: true
permissions: {}
name: Build checks preview riscv64
@@ -63,7 +67,9 @@ jobs:
path: src/runtime-rs
needs:
- rust
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ inputs.instance }}-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Adjust a permission for repo
run: |

View File

@@ -5,13 +5,17 @@ on:
required: true
type: string
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-build-checks
cancel-in-progress: true
permissions: {}
name: Build checks
jobs:
check:
name: check
timeout-minutes: 60
runs-on: >-
${{
( contains(inputs.instance, 's390x') && matrix.component.name == 'runtime' ) && 's390x' ||
@@ -75,7 +79,9 @@ jobs:
- protobuf-compiler
instance:
- ${{ inputs.instance }}
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ inputs.instance }}-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Adjust a permission for repo
run: |

View File

@@ -28,6 +28,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-tarball-amd64
cancel-in-progress: true
jobs:
build-asset:
name: build-asset
@@ -64,6 +68,9 @@ jobs:
exclude:
- asset: cloud-hypervisor-glibc
stage: release
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
env:
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
steps:
@@ -169,6 +176,9 @@ jobs:
- rootfs-image-nvidia-gpu-confidential
- rootfs-initrd
- rootfs-initrd-confidential
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Login to Kata Containers quay.io
if: ${{ inputs.push-to-registry == 'yes' }}
@@ -236,6 +246,9 @@ jobs:
- coco-guest-components
- kernel-nvidia-gpu-modules
- pause-image
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
with:
@@ -250,6 +263,9 @@ jobs:
matrix:
asset:
- agent
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
if: ${{ inputs.stage == 'release' }}
@@ -321,6 +337,7 @@ jobs:
create-kata-tarball:
name: create-kata-tarball
runs-on: ubuntu-22.04
timeout-minutes: 10
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
permissions:
contents: read
@@ -382,6 +399,9 @@ jobs:
- trace-forwarder
stage:
- ${{ inputs.stage }}
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Login to Kata Containers quay.io
if: ${{ inputs.push-to-registry == 'yes' }}

View File

@@ -28,6 +28,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-arm64
cancel-in-progress: true
jobs:
build-asset:
name: build-asset
@@ -53,6 +57,9 @@ jobs:
- ovmf
- qemu
- virtiofsd
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-arm64-${{ toJSON(matrix) }}
cancel-in-progress: true
env:
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
steps:
@@ -153,6 +160,9 @@ jobs:
- rootfs-image
- rootfs-image-nvidia-gpu
- rootfs-initrd
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-arm-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Login to Kata Containers quay.io
if: ${{ inputs.push-to-registry == 'yes' }}
@@ -217,6 +227,9 @@ jobs:
asset:
- busybox
- kernel-nvidia-gpu-modules
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-arm-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
with:
@@ -300,6 +313,7 @@ jobs:
create-kata-tarball:
name: create-kata-tarball
runs-on: ubuntu-24.04-arm
timeout-minutes: 10
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
permissions:
contents: read

View File

@@ -26,6 +26,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-ppc64le
cancel-in-progress: true
jobs:
build-asset:
name: build-asset
@@ -42,6 +46,9 @@ jobs:
- virtiofsd
stage:
- ${{ inputs.stage }}
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-ppc64le-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Login to Kata Containers quay.io
if: ${{ inputs.push-to-registry == 'yes' }}
@@ -100,6 +107,9 @@ jobs:
- rootfs-initrd
stage:
- ${{ inputs.stage }}
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-ppc64le-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Login to Kata Containers quay.io
if: ${{ inputs.push-to-registry == 'yes' }}
@@ -162,6 +172,9 @@ jobs:
matrix:
asset:
- agent
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-ppc64le-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
if: ${{ inputs.stage == 'release' }}
@@ -231,6 +244,7 @@ jobs:
create-kata-tarball:
name: create-kata-tarball
runs-on: ubuntu-24.04-ppc64le
timeout-minutes: 10
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
permissions:
contents: read

View File

@@ -21,6 +21,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-tarball-riscv64
cancel-in-progress: true
permissions: {}
jobs:
@@ -37,6 +41,9 @@ jobs:
asset:
- kernel
- virtiofsd
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-riscv-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:

View File

@@ -29,6 +29,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-tarball-s390x
cancel-in-progress: true
jobs:
build-asset:
name: build-asset
@@ -47,6 +51,9 @@ jobs:
- pause-image
- qemu
- virtiofsd
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-s390x-${{ toJSON(matrix) }}
cancel-in-progress: true
env:
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
steps:
@@ -134,6 +141,9 @@ jobs:
- rootfs-image-confidential
- rootfs-initrd
- rootfs-initrd-confidential
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-s390x-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- name: Login to Kata Containers quay.io
if: ${{ inputs.push-to-registry == 'yes' }}
@@ -248,6 +258,9 @@ jobs:
- agent
- coco-guest-components
- pause-image
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-s390x-${{ toJSON(matrix) }}
cancel-in-progress: true
steps:
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
if: ${{ inputs.stage == 'release' }}
@@ -319,6 +332,7 @@ jobs:
create-kata-tarball:
name: create-kata-tarball
runs-on: ubuntu-24.04-s390x
timeout-minutes: 10
needs:
- build-asset
- build-asset-rootfs

View File

@@ -15,6 +15,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-kubectl-image
cancel-in-progress: true
env:
REGISTRY: quay.io
IMAGE_NAME: kata-containers/kubectl

View File

@@ -2,6 +2,10 @@ name: Kata Containers CI (manually triggered)
on:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-devel
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -6,6 +6,10 @@ name: Nightly CI for s390x
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-nightly-s390x
cancel-in-progress: true
jobs:
check-internal-test-result:
name: check-internal-test-result

View File

@@ -22,7 +22,7 @@ on:
AZ_APPID:
required: true
AZ_TENANT_ID:
required: true
required: true
AZ_SUBSCRIPTION_ID:
required: true
QUAY_DEPLOYER_PASSWORD:
@@ -32,6 +32,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-weekly
cancel-in-progress: true
jobs:
build-kata-static-tarball-amd64:
permissions:

View File

@@ -30,7 +30,7 @@ on:
AZ_APPID:
required: true
AZ_TENANT_ID:
required: true
required: true
AZ_SUBSCRIPTION_ID:
required: true
CI_HKD_PATH:
@@ -46,6 +46,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-ci
cancel-in-progress: true
jobs:
build-kata-static-tarball-amd64:
permissions:
@@ -370,7 +374,7 @@ jobs:
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
run-cri-containerd-amd64:
run-cri-containerd-tests-amd64:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64
strategy:
@@ -387,7 +391,10 @@ jobs:
{ containerd_version: active, vmm: qemu },
{ containerd_version: active, vmm: cloud-hypervisor },
{ containerd_version: active, vmm: qemu-runtime-rs },
]
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-cri-amd64-${{ toJSON(matrix) }}
cancel-in-progress: true
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
@@ -398,16 +405,19 @@ jobs:
containerd_version: ${{ matrix.params.containerd_version }}
vmm: ${{ matrix.params.vmm }}
run-cri-containerd-s390x:
run-cri-containerd-tests-s390x:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-s390x
strategy:
fail-fast: false
matrix:
params: [
{ containerd_version: active, vmm: qemu },
{ containerd_version: active, vmm: qemu-runtime-rs },
]
{containerd_version: active, vmm: qemu},
{containerd_version: active, vmm: qemu-runtime-rs},
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
@@ -425,8 +435,11 @@ jobs:
fail-fast: false
matrix:
params: [
{ containerd_version: active, vmm: qemu },
]
{containerd_version: active, vmm: qemu},
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-cri-ppc64le-${{ toJSON(matrix) }}
cancel-in-progress: true
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
@@ -444,8 +457,11 @@ jobs:
fail-fast: false
matrix:
params: [
{ containerd_version: active, vmm: qemu },
]
{containerd_version: active, vmm: qemu},
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-cri-arm64-${{ toJSON(matrix) }}
cancel-in-progress: true
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}

View File

@@ -4,6 +4,10 @@ on:
- cron: "0 0 * * *"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -19,6 +19,10 @@ on:
schedule:
- cron: '45 0 * * 1'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}

View File

@@ -3,6 +3,10 @@ on:
- cron: '0 23 * * 0'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
name: Docs URL Alive Check

View File

@@ -3,7 +3,9 @@ on:
push:
branches:
- main
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

View File

@@ -31,6 +31,10 @@ on:
skip_static:
value: ${{ jobs.skipper.outputs.skip_static }}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-gatekeeper-skipper
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -3,6 +3,10 @@ on:
name: Govulncheck
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
jobs:
@@ -22,7 +26,7 @@ jobs:
steps:
- name: Checkout the code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
persist-credentials: false

View File

@@ -15,6 +15,10 @@ on:
push:
branches: [ "main" ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-osv-scanner
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -34,6 +34,10 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ inputs.arch }}-publish-deploy
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -12,6 +12,10 @@ on:
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
push-oras-cache:
name: push-oras-cache

View File

@@ -11,6 +11,10 @@ on:
KBUILD_SIGN_PIN:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false # Note - don't cancel the in progress build as we could end up with inconsistent results
permissions: {}
jobs:

View File

@@ -11,6 +11,10 @@ on:
KBUILD_SIGN_PIN:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false # Note - don't cancel the in progress build as we could end up with inconsistent results
permissions: {}
jobs:

View File

@@ -9,6 +9,10 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false # Note - don't cancel the in progress build as we could end up with inconsistent results
permissions: {}
jobs:

View File

@@ -11,6 +11,10 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false # Note - don't cancel the in progress build as we could end up with inconsistent results
permissions: {}
jobs:

View File

@@ -2,6 +2,10 @@ name: Release Kata Containers
on:
workflow_dispatch
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false # Note - don't cancel the in progress build as we could end up with inconsistent results
permissions: {}
jobs:

View File

@@ -1,7 +1,5 @@
name: CI | Run cri-containerd tests
permissions: {}
on:
workflow_call:
inputs:
@@ -32,6 +30,12 @@ on:
required: true
type: string
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-cri-tests-${{ toJSON(inputs) }}
cancel-in-progress: true
permissions: {}
jobs:
run-cri-containerd:
name: run-cri-containerd-${{ inputs.arch }} (${{ inputs.containerd_version }}, ${{ inputs.vmm }})

View File

@@ -29,10 +29,13 @@ on:
AZ_APPID:
required: true
AZ_TENANT_ID:
required: true
required: true
AZ_SUBSCRIPTION_ID:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-aks
cancel-in-progress: true
permissions: {}
@@ -54,6 +57,9 @@ jobs:
- host_os: cbl-mariner
vmm: clh
instance-type: normal
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-run-k8s-tests-aks-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
permissions:
contents: read

View File

@@ -22,6 +22,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-k8s-arm64
cancel-in-progress: true
permissions: {}
jobs:
@@ -35,6 +39,9 @@ jobs:
- qemu-runtime-rs
k8s:
- kubeadm
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-arm64-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: arm64-k8s
env:
DOCKER_REGISTRY: ${{ inputs.registry }}

View File

@@ -27,6 +27,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-free-runner
cancel-in-progress: true
permissions: {}
jobs:
@@ -47,6 +51,9 @@ jobs:
{ vmm: cloud-hypervisor, containerd_version: lts },
{ vmm: cloud-hypervisor, containerd_version: active },
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-free-runner-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-24.04
permissions:
contents: read

View File

@@ -28,6 +28,10 @@ on:
NGC_API_KEY:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-nvidia-gpu
cancel-in-progress: true
permissions: {}
jobs:
@@ -40,6 +44,9 @@ jobs:
{ name: nvidia-gpu, vmm: qemu-nvidia-gpu, runner: amd64-nvidia-a100 },
{ name: nvidia-gpu-snp, vmm: qemu-nvidia-gpu-snp, runner: amd64-nvidia-h100-snp },
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ${{ matrix.environment.runner }}
env:
DOCKER_REGISTRY: ${{ inputs.registry }}

View File

@@ -22,6 +22,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-ppc64le
cancel-in-progress: true
permissions: {}
jobs:
@@ -34,6 +38,9 @@ jobs:
- qemu
k8s:
- kubeadm
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-ppc64le-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ppc64le-k8s
env:
DOCKER_REGISTRY: ${{ inputs.registry }}

View File

@@ -25,6 +25,10 @@ on:
AUTHENTICATED_IMAGE_PASSWORD:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-zvsi
cancel-in-progress: true
permissions: {}
jobs:
@@ -63,6 +67,9 @@ jobs:
vmm: qemu
- snapshotter: nydus
vmm: qemu-runtime-rs
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-zvsi-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: s390x-large
env:
DOCKER_REGISTRY: ${{ inputs.registry }}

View File

@@ -29,12 +29,16 @@ on:
AZ_APPID:
required: true
AZ_TENANT_ID:
required: true
required: true
AZ_SUBSCRIPTION_ID:
required: true
AUTHENTICATED_IMAGE_PASSWORD:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-coco-stability
cancel-in-progress: true
permissions: {}
jobs:
@@ -51,6 +55,9 @@ jobs:
- nydus
pull-type:
- guest-pull
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
permissions:

View File

@@ -34,12 +34,16 @@ on:
AZ_APPID:
required: true
AZ_TENANT_ID:
required: true
required: true
AZ_SUBSCRIPTION_ID:
required: true
ITA_KEY:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-coco
cancel-in-progress: true
permissions: {}
jobs:
@@ -53,6 +57,11 @@ jobs:
vmm: qemu-tdx
- runner: sev-snp
vmm: qemu-snp
- runner: sev-snp
vmm: qemu-snp-runtime-rs
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ${{ matrix.runner }}
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
@@ -141,6 +150,9 @@ jobs:
{ vmm: qemu-coco-dev-runtime-rs, snapshotter: nydus, pull_type: guest-pull },
{ vmm: qemu-coco-dev, snapshotter: "", pull_type: experimental-force-guest-pull },
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-24.04
permissions:
contents: read
@@ -268,6 +280,9 @@ jobs:
{ k8s: microk8s, vmm: qemu-coco-dev, snapshotter: "", pull_type: experimental-force-guest-pull },
{ k8s: microk8s, vmm: qemu-coco-dev-runtime-rs, snapshotter: nydus, pull_type: guest-pull },
]
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-24.04
permissions:
contents: read
@@ -382,6 +397,9 @@ jobs:
- erofs
pull-type:
- default
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
runs-on: ubuntu-24.04
environment:
name: ci

View File

@@ -25,10 +25,14 @@ on:
AZ_APPID:
required: true
AZ_TENANT_ID:
required: true
required: true
AZ_SUBSCRIPTION_ID:
required: true
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-kata-deploy-aks
cancel-in-progress: true
permissions: {}
jobs:
@@ -47,6 +51,9 @@ jobs:
include:
- host_os: cbl-mariner
vmm: clh
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
environment:
name: ci

View File

@@ -22,6 +22,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-kata-deploy
cancel-in-progress: true
permissions: {}
jobs:
@@ -37,6 +41,9 @@ jobs:
- k3s
- rke2
- microk8s
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
env:
DOCKER_REGISTRY: ${{ inputs.registry }}

View File

@@ -13,6 +13,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-kata-monitor
cancel-in-progress: true
permissions: {}
jobs:
@@ -34,6 +38,9 @@ jobs:
# TODO: enable with containerd when https://github.com/kata-containers/kata-containers/issues/9761 is fixed
- container_engine: containerd
vmm: qemu
concurrency:
group: ${{ github.workflow }}-${{ github.job }}-${{ github.event.pull_request.number || github.ref }}-${{ toJSON(matrix) }}
cancel-in-progress: true
runs-on: ubuntu-22.04
env:
CONTAINER_ENGINE: ${{ matrix.container_engine }}

View File

@@ -22,6 +22,10 @@ on:
type: string
default: ""
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-metrics
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -11,6 +11,10 @@ on:
branches: [ "main" ]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
jobs:

View File

@@ -26,4 +26,4 @@ jobs:
advanced-security: false
annotations: true
persona: auditor
version: v1.13.0
version: v1.22.0

View File

@@ -159,7 +159,6 @@ netns-rs = "0.1.0"
# Note: nix needs to stay sync'd with libs versions
nix = "0.26.4"
oci-spec = { version = "0.8.1", features = ["runtime"] }
pathrs = "0.2.4"
opentelemetry = { version = "0.17.0", features = ["rt-tokio"] }
procfs = "0.12.0"
prometheus = { version = "0.14.0", features = ["process"] }

View File

@@ -81,7 +81,6 @@ safe-path.workspace = true
# to be modified at runtime.
logging.workspace = true
vsock-exporter.workspace = true
pathrs.workspace = true
# Initdata
base64 = "0.22"

View File

@@ -46,7 +46,6 @@ libseccomp = { version = "0.3.0", optional = true }
zbus = "3.12.0"
bit-vec = "0.8.0"
xattr = "0.2.3"
pathrs.workspace = true
# Local dependencies
protocols.workspace = true

View File

@@ -16,9 +16,8 @@ use nix::NixPath;
use oci::{LinuxDevice, Mount, Process, Spec};
use oci_spec::runtime as oci;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::fs::{self, OpenOptions};
use std::mem::MaybeUninit;
use std::os::fd::AsRawFd;
use std::os::unix;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::io::RawFd;
@@ -48,7 +47,6 @@ pub struct Info {
const MOUNTINFO_FORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
const MOUNTINFO_PATH: &str = "/proc/self/mountinfo";
const PROC_PATH: &str = "/proc";
const SHARED_DIRECTORY: &str = "/run/kata-containers/shared/containers";
const ERR_FAILED_PARSE_MOUNTINFO: &str = "failed to parse mountinfo file";
const ERR_FAILED_PARSE_MOUNTINFO_FINAL_FIELDS: &str =
@@ -235,11 +233,10 @@ pub fn init_rootfs(
// From https://github.com/opencontainers/runtime-spec/blob/main/config.md#mounts
// type (string, OPTIONAL) The type of the filesystem to be mounted.
// For bind mounts, this can be empty or any string whatsoever. For consistency, we set it
// to 'bind'.
// bind may be only specified in the oci spec options -> flags update r#type
let m = &{
let mut mbind = m.clone();
if flags.contains(MsFlags::MS_BIND) {
if is_none_mount_type(mbind.typ()) && flags & MsFlags::MS_BIND == MsFlags::MS_BIND {
mbind.set_typ(Some("bind".to_string()));
}
mbind
@@ -400,6 +397,13 @@ fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) ->
Ok(())
}
fn is_none_mount_type(typ: &Option<String>) -> bool {
match typ {
Some(t) => t == "none",
None => true,
}
}
fn mount_cgroups(
cfd_log: RawFd,
m: &Mount,
@@ -759,20 +763,48 @@ fn mount_from(
let mut d = String::from(data);
let mount_dest = m.destination().display().to_string();
let mount_typ = m.typ().as_ref().unwrap();
if mount_typ == "bind" {
// Bind mounts need special treatment for security, handle them separately.
return bind_mount_from(m, rootfs, flags)
.inspect_err(|e| log_child!(cfd_log, "bind_mount_from failed: {:?}", e));
}
let dest = scoped_join(rootfs, mount_dest)?
.to_str()
.ok_or_else(|| anyhow::anyhow!("Failed to convert path to string"))?
.to_string();
let mount_source = m.source().as_ref().unwrap().display().to_string();
let src = {
let src = if mount_typ == "bind" {
let src = fs::canonicalize(&mount_source)?;
let dir = if src.is_dir() {
Path::new(&dest)
} else {
Path::new(&dest).parent().unwrap()
};
fs::create_dir_all(dir).inspect_err(|e| {
log_child!(
cfd_log,
"create dir {}: {}",
dir.to_str().unwrap(),
e.to_string()
)
})?;
// make sure file exists so we can bind over it
if !src.is_dir() {
let _ = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(&dest)
.map_err(|e| {
log_child!(
cfd_log,
"open/create dest error. {}: {:?}",
dest.as_str(),
e
);
e
})?;
}
src.to_str().unwrap().to_string()
} else {
let _ = fs::create_dir_all(&dest);
if mount_typ == "cgroup2" {
"cgroup2".to_string()
@@ -832,126 +864,25 @@ fn mount_from(
if !label.is_empty() && selinux::is_enabled()? && use_xattr {
xattr::set(dest.as_str(), "security.selinux", label.as_bytes())?;
}
Ok(())
}
fn bind_mount_from(m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
let mount_source_fd = {
let shared_dir = PathBuf::from(SHARED_DIRECTORY);
let unsafe_mount_source = m
.source()
.as_ref()
.ok_or_else(|| anyhow::anyhow!("Missing source for bind mount"))?;
// Policy checks ensured earlier that shared mount sources start with the `sfprefix`.
// Therefore, it's safe to derive the root for scoping the mount source from that prefix.
let (root_path, inner_path) = match unsafe_mount_source.strip_prefix(&shared_dir) {
Ok(inner) => (shared_dir, inner), // needs scoping
Err(_) => (PathBuf::from("/"), unsafe_mount_source.as_path()), // does not need scoping, i.e. scoped to root
};
let root = pathrs::Root::open(&root_path)
.context(format!("opening mount_source_root {:?} failed", root_path))?;
root.open_subpath(inner_path, pathrs::flags::OpenFlags::O_PATH)
.context(format!(
"opening {:?} in {:?} failed",
inner_path, root_path
))?
};
let container_root = pathrs::Root::open(rootfs)
.context(format!("opening mount_source_root {:?} failed", rootfs))?;
// .metadata queries attrs with statx + AT_EMPTY_PATH, i.e. directly from the opened fd.
let meta = mount_source_fd.metadata().context("statx failed")?;
let mount_destination_fd = if meta.is_dir() {
container_root
.mkdir_all(m.destination(), &std::fs::Permissions::from_mode(0o755))
.context(format!(
"mkdir_all for {:?} in {} failed",
m.destination(),
rootfs
))?
.reopen(pathrs::flags::OpenFlags::O_DIRECTORY)?
} else if meta.is_symlink() {
anyhow::bail!("won't bind mount from symlink {:?}: {:?}", m.destination(), meta)
} else {
// All other bind mounts (files, devices, sockets) should target a file.
if let Some(parent) = m.destination().parent() {
let _ = container_root
.mkdir_all(parent, &std::fs::Permissions::from_mode(0o755))
.context(format!("mkdir_all for {:?} in {} failed", parent, rootfs))?;
}
container_root
.create_file(
m.destination(),
pathrs::flags::OpenFlags::O_TRUNC,
&std::fs::Permissions::from_mode(0o755),
)
.context(format!(
"create_file for {:?} in {} failed",
m.destination(),
rootfs
))?
};
let open_tree_flags = if flags.intersects(MsFlags::MS_REC) {
libc::AT_RECURSIVE
} else {
0
};
let empty_path = std::ffi::CString::new("")?;
let tree_dfd = unsafe {
libc::syscall(
libc::SYS_open_tree,
mount_source_fd.as_raw_fd(),
empty_path.as_ptr(),
libc::OPEN_TREE_CLONE
| libc::OPEN_TREE_CLOEXEC
| libc::AT_EMPTY_PATH as u32
| open_tree_flags as u32,
if flags.contains(MsFlags::MS_BIND)
&& flags.intersects(
!(MsFlags::MS_REC
| MsFlags::MS_REMOUNT
| MsFlags::MS_BIND
| MsFlags::MS_PRIVATE
| MsFlags::MS_SHARED
| MsFlags::MS_SLAVE),
)
};
if tree_dfd < 0 {
return Err(std::io::Error::last_os_error().into());
}
let ret = unsafe {
libc::syscall(
libc::SYS_move_mount,
tree_dfd,
empty_path.as_ptr(),
mount_destination_fd.as_raw_fd(),
empty_path.as_ptr(),
0x01 /* MOVE_MOUNT_F_EMPTY_PATH */ | 0x02, /* MOVE_MOUNT_T_EMPTY_PATH */
)
};
if ret < 0 {
return Err(std::io::Error::last_os_error().into());
}
if flags.intersects(
!(MsFlags::MS_REC
| MsFlags::MS_REMOUNT
| MsFlags::MS_BIND
| MsFlags::MS_PRIVATE
| MsFlags::MS_SHARED
| MsFlags::MS_SLAVE),
) {
// TODO(burgerdev): we really should be using mount_setattr here, but the necessary API is not in nix.
// We successfully resolved the destination within the rootfs above. If nothing else messed
// with the filesystem in between, using the path unchecked should be safe.
let dest = scoped_join(rootfs, m.destination())?;
{
mount(
Some(&dest),
&dest,
Some(dest.as_str()),
dest.as_str(),
None::<&str>,
flags | MsFlags::MS_REMOUNT,
None::<&str>,
)
.context("remount failed")?;
.inspect_err(|e| log_child!(cfd_log, "remout {}: {:?}", dest.as_str(), e))?;
}
Ok(())
}

View File

@@ -4,7 +4,6 @@
//
use async_trait::async_trait;
use pathrs::flags::OpenFlags;
use rustjail::{pipestream::PipeStream, process::StreamType};
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf};
use tokio::sync::Mutex;
@@ -14,7 +13,6 @@ use std::convert::TryFrom;
use std::ffi::{CString, OsStr};
use std::fmt::Debug;
use std::io;
use std::os::fd::AsRawFd;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
#[cfg(target_arch = "s390x")]
@@ -101,7 +99,7 @@ use std::os::unix::prelude::PermissionsExt;
use std::process::{Command, Stdio};
use nix::unistd::{Gid, Uid};
use std::fs::File;
use std::fs::{File, OpenOptions};
use std::io::{BufRead, BufReader, Write};
use std::os::unix::fs::FileExt;
use std::path::PathBuf;
@@ -134,18 +132,6 @@ const ERR_NO_SANDBOX_PIDNS: &str = "Sandbox does not have sandbox_pidns";
// not available.
const IPTABLES_RESTORE_WAIT_SEC: u64 = 5;
/// This mask is applied to parent directories implicitly created for CopyFile requests.
const IMPLICIT_DIRECTORY_PERMISSION_MASK: u32 = 0o777;
/// This mask is applied to directories explicitly created for CopyFile requests.
/// setgid and sticky bit are valid for such directories, whereas setuid is not.
const EXPLICIT_DIRECTORY_PERMISSION_MASK: u32 = 0o3777;
/// This mask is applied to files created for CopyFile requests.
/// This constant is used for consistency with *_DIRECTORY_PERMISSION_MASK.
const FILE_PERMISSION_MASK: u32 = 0o7777;
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger()
@@ -1535,9 +1521,7 @@ impl agent_ttrpc::AgentService for AgentService {
trace_rpc_call!(ctx, "copy_file", req);
is_allowed(&req).await?;
// Potentially untrustworthy data from the host needs to go into the shared dir.
let root_path = PathBuf::from(KATA_GUEST_SHARE_DIR);
do_copy_file(&req, &root_path).map_ttrpc_err(same)?;
do_copy_file(&req).map_ttrpc_err(same)?;
Ok(Empty::new())
}
@@ -2051,116 +2035,125 @@ fn do_set_guest_date_time(sec: i64, usec: i64) -> Result<()> {
Ok(())
}
/// do_copy_file creates a file, directory or symlink beneath the provided directory.
///
/// The function guarantees that no content is written outside of the directory. However, a symlink
/// created by this function might point outside the shared directory. Other users of that
/// directory need to consider whether they trust the host, or handle the directory with the same
/// care as do_copy_file.
///
/// Parent directories are created, if they don't exist already. For these implicit operations, the
/// permissions are set with req.dir_mode. The actual target is created with permissions from
/// req.file_mode, even if it's a directory. For symlinks, req.file_mode is
///
/// If this function returns an error, the filesystem may be in an unexpected state. This is not
/// significant for the caller, since errors are almost certainly not retriable. The runtime should
/// abandon this VM instead.
fn do_copy_file(req: &CopyFileRequest, shared_dir: &PathBuf) -> Result<()> {
let insecure_full_path = PathBuf::from(req.path.as_str());
let path = insecure_full_path
.strip_prefix(&shared_dir)
.context(format!(
"removing {:?} prefix from {}",
shared_dir, req.path
))?;
fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
let path = PathBuf::from(req.path.as_str());
// The shared directory might not exist yet, but we need to create it in order to open the root.
std::fs::create_dir_all(shared_dir)?;
let root = pathrs::Root::open(shared_dir)?;
// Remove anything that might already exist at the target location.
// This is safe even for a symlink leaf, remove_all removes the named inode in its parent dir.
root.remove_all(path).or_else(|e| match e.kind() {
pathrs::error::ErrorKind::OsError(Some(errno)) if errno == libc::ENOENT => Ok(()),
_ => Err(e),
})?;
if !path.starts_with(CONTAINER_BASE) {
return Err(anyhow!(
"Path {:?} does not start with {}",
path,
CONTAINER_BASE
));
}
// Create parent directories if missing
if let Some(parent) = path.parent() {
let dir = root
.mkdir_all(
parent,
&std::fs::Permissions::from_mode(req.dir_mode & IMPLICIT_DIRECTORY_PERMISSION_MASK),
)
.context("mkdir_all parent")?
.reopen(OpenFlags::O_DIRECTORY)
.context("reopen parent")?;
if !parent.exists() {
let dir = parent.to_path_buf();
// Attempt to create directory, ignore AlreadyExists errors
if let Err(e) = fs::create_dir_all(&dir) {
if e.kind() != std::io::ErrorKind::AlreadyExists {
return Err(e.into());
}
}
// TODO(burgerdev): why are we only applying this to the immediate parent?
unistd::fchown(
dir.as_raw_fd(),
Some(Uid::from_raw(req.uid as u32)),
Some(Gid::from_raw(req.gid as u32)),
)
.context("fchown parent")?
// Set directory permissions and ownership
std::fs::set_permissions(&dir, std::fs::Permissions::from_mode(req.dir_mode))?;
unistd::chown(
&dir,
Some(Uid::from_raw(req.uid as u32)),
Some(Gid::from_raw(req.gid as u32)),
)?;
}
}
let sflag = stat::SFlag::from_bits_truncate(req.file_mode);
if sflag.contains(stat::SFlag::S_IFDIR) {
// mkdir_all does not support the setuid/setgid/sticky bits, so we first create the
// directory with the stricter mask and then change permissions with the correct mask.
let dir = root
.mkdir_all(
&path,
&std::fs::Permissions::from_mode(req.file_mode & IMPLICIT_DIRECTORY_PERMISSION_MASK),
)
.context("mkdir_all dir")?
.reopen(OpenFlags::O_DIRECTORY)
.context("reopen dir")?;
dir.set_permissions(std::fs::Permissions::from_mode(req.file_mode & EXPLICIT_DIRECTORY_PERMISSION_MASK))?;
// Remove existing non-directory file if present
if path.exists() && !path.is_dir() {
fs::remove_file(&path)?;
}
unistd::fchown(
dir.as_raw_fd(),
fs::create_dir(&path).or_else(|e| {
if e.kind() != std::io::ErrorKind::AlreadyExists {
return Err(e);
}
Ok(())
})?;
std::fs::set_permissions(&path, std::fs::Permissions::from_mode(req.file_mode))?;
unistd::chown(
&path,
Some(Uid::from_raw(req.uid as u32)),
Some(Gid::from_raw(req.gid as u32)),
)
.context("fchown dir")?;
)?;
return Ok(());
}
// Handle symlink creation
if sflag.contains(stat::SFlag::S_IFLNK) {
// Clean up existing path (whether symlink, dir, or file)
if path.exists() || path.is_symlink() {
// Use appropriate removal method based on path type
if path.is_symlink() {
unistd::unlink(&path)?;
} else if path.is_dir() {
fs::remove_dir_all(&path)?;
} else {
fs::remove_file(&path)?;
}
}
// Create new symbolic link
let src = PathBuf::from(OsStr::from_bytes(&req.data));
root.create(path, &pathrs::InodeType::Symlink(src))
.context("create symlink")?;
// Symlinks don't have permissions on Linux!
unistd::symlinkat(&src, None, &path)?;
// Set symlink ownership (permissions not supported for symlinks)
let path_str = CString::new(path.as_os_str().as_bytes())?;
let ret = unsafe { libc::lchown(path_str.as_ptr(), req.uid as u32, req.gid as u32) };
Errno::result(ret).map(drop)?;
return Ok(());
}
// Write file content.
let flags = if req.offset == 0 {
OpenFlags::O_RDWR | OpenFlags::O_CREAT | OpenFlags::O_TRUNC
} else {
OpenFlags::O_RDWR | OpenFlags::O_CREAT
};
let file = root
.create_file(path, flags, &std::fs::Permissions::from_mode(req.file_mode & FILE_PERMISSION_MASK))
.context("create_file")?;
file.write_all_at(req.data.as_slice(), req.offset as u64)
.context("write_all_at")?;
// Things like umask can change the permissions after create, make sure that they stay
file.set_permissions(std::fs::Permissions::from_mode(req.file_mode & FILE_PERMISSION_MASK))
.context("set_permissions")?;
let mut tmpfile = path.clone();
tmpfile.set_extension("tmp");
unistd::fchown(
file.as_raw_fd(),
let file = OpenOptions::new()
.write(true)
.create(true)
.truncate(req.offset == 0) // Only truncate when offset is 0
.open(&tmpfile)?;
file.write_all_at(req.data.as_slice(), req.offset as u64)?;
let st = stat::stat(&tmpfile)?;
if st.st_size != req.file_size {
return Ok(());
}
file.set_permissions(std::fs::Permissions::from_mode(req.file_mode))?;
unistd::chown(
&tmpfile,
Some(Uid::from_raw(req.uid as u32)),
Some(Gid::from_raw(req.gid as u32)),
)
.context("fchown")?;
)?;
// Remove existing target path before rename
if path.exists() || path.is_symlink() {
if path.is_dir() {
fs::remove_dir_all(&path)?;
} else {
fs::remove_file(&path)?;
}
}
fs::rename(tmpfile, path)?;
Ok(())
}
@@ -2451,7 +2444,6 @@ mod tests {
use super::*;
use crate::{namespace::Namespace, protocols::agent_ttrpc_async::AgentService as _};
use anyhow::ensure;
use nix::mount;
use nix::sched::{unshare, CloneFlags};
use oci::{
@@ -3473,274 +3465,4 @@ COMMIT
assert_eq!(d.result, result, "{msg}");
}
}
#[tokio::test]
async fn test_do_copy_file() {
let temp_dir = tempdir().expect("creating temp dir failed");
// We start one directory deeper such that we catch problems when the shared directory does
// not exist yet.
let base = temp_dir.path().join("shared");
struct TestCase {
name: String,
request: CopyFileRequest,
assertions: Box<dyn Fn(&Path) -> Result<()>>,
should_fail: bool,
}
let tests = [
TestCase {
name: "Create a top-level file".into(),
request: CopyFileRequest {
path: base.join("f").to_string_lossy().into(),
file_mode: 0o644 | libc::S_IFREG,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let f = base.join("f");
let f_stat = fs::metadata(&f).context("stat ./f failed")?;
ensure!(f_stat.is_file());
ensure!(0o644 == f_stat.permissions().mode() & 0o777);
let content = std::fs::read_to_string(&f).context("read ./f failed")?;
ensure!(content.is_empty());
Ok(())
}),
},
TestCase {
name: "Replace a top-level file".into(),
request: CopyFileRequest {
path: base.join("f").to_string_lossy().into(),
file_mode: 0o600 | libc::S_IFREG,
data: b"Hello!".to_vec(),
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let f = base.join("f");
let f_stat = fs::metadata(&f).context("stat ./f failed")?;
ensure!(f_stat.is_file());
ensure!(0o600 == f_stat.permissions().mode() & 0o777);
let content = std::fs::read_to_string(&f).context("read ./f failed")?;
ensure!("Hello!" == content);
Ok(())
}),
},
TestCase {
name: "Create a file and its parent directory".into(),
request: CopyFileRequest {
path: base.join("a/b").to_string_lossy().into(),
dir_mode: 0o755 | libc::S_IFDIR,
file_mode: 0o644 | libc::S_IFREG,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let a_stat = fs::metadata(base.join("a")).context("stat ./a failed")?;
ensure!(a_stat.is_dir());
ensure!(0o755 == a_stat.permissions().mode() & 0o777);
let b_stat = fs::metadata(base.join("a/b")).context("stat ./a/b failed")?;
ensure!(b_stat.is_file());
ensure!(0o644 == b_stat.permissions().mode() & 0o777);
Ok(())
}),
},
TestCase {
name: "Create a file within an existing directory".into(),
request: CopyFileRequest {
path: base.join("a/c").to_string_lossy().into(),
dir_mode: 0o700 | libc::S_IFDIR, // Test that existing directories are not touched - we expect this to stay 0o755.
file_mode: 0o621 | libc::S_IFREG,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let a_stat = fs::metadata(base.join("a")).context("stat ./a failed")?;
ensure!(a_stat.is_dir());
ensure!(0o755 == a_stat.permissions().mode() & 0o777);
let c_stat = fs::metadata(base.join("a/c")).context("stat ./a/c failed")?;
ensure!(c_stat.is_file());
ensure!(0o621 == c_stat.permissions().mode() & 0o777);
Ok(())
}),
},
TestCase {
name: "Create a directory".into(),
request: CopyFileRequest {
path: base.join("a/d").to_string_lossy().into(),
dir_mode: 0o700 | libc::S_IFDIR, // Test that the permissions are taken from file_mode.
file_mode: 0o755 | libc::S_IFDIR,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let a_stat = fs::metadata(base.join("a")).context("stat ./a failed")?;
ensure!(a_stat.is_dir());
ensure!(0o755 == a_stat.permissions().mode() & 0o777);
let d_stat = fs::metadata(base.join("a/d")).context("stat ./a/d failed")?;
ensure!(d_stat.is_dir());
ensure!(0o755 == d_stat.permissions().mode() & 0o777);
Ok(())
}),
},
TestCase {
name: "Create a dir onto an existing file".into(),
request: CopyFileRequest {
path: base.join("a/b").to_string_lossy().into(),
dir_mode: 0o700 | libc::S_IFDIR, // Test that the permissions are taken from file_mode.
file_mode: 0o755 | libc::S_IFDIR,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let b_stat = fs::metadata(base.join("a/b")).context("stat ./a/b failed")?;
ensure!(b_stat.is_dir());
ensure!(0o755 == b_stat.permissions().mode() & 0o777);
Ok(())
}),
},
TestCase {
name: "Create a file onto an existing dir".into(),
request: CopyFileRequest {
path: base.join("a/b").to_string_lossy().into(),
dir_mode: 0o755 | libc::S_IFDIR,
file_mode: 0o644 | libc::S_IFREG,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let b_stat = fs::metadata(base.join("a/b")).context("stat ./a/b failed")?;
ensure!(b_stat.is_file());
ensure!(0o644 == b_stat.permissions().mode() & 0o777);
Ok(())
}),
},
TestCase {
name: "Create a symlink".into(),
request: CopyFileRequest {
path: base.join("a/link").to_string_lossy().into(),
dir_mode: 0o700 | libc::S_IFDIR, // Test that the permissions are taken from file_mode.
file_mode: 0o755 | libc::S_IFLNK,
data: b"/etc/passwd".to_vec(),
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
let a_stat = fs::metadata(base.join("a")).context("stat ./a failed")?;
ensure!(a_stat.is_dir());
ensure!(0o755 == a_stat.permissions().mode() & 0o777);
let link = base.join("a/link");
let link_stat = nix::sys::stat::lstat(&link).context("stat ./a/link failed")?;
// Linux symlinks have no permissions!
ensure!(0o777 | libc::S_IFLNK == link_stat.st_mode);
let target = fs::read_link(&link).context("read_link ./a/link failed")?;
ensure!(target.to_string_lossy() == "/etc/passwd");
Ok(())
}),
},
TestCase {
name: "Create a directory with setgid and sticky bit".into(),
request: CopyFileRequest {
path: base.join("x/y").to_string_lossy().into(),
dir_mode: 0o3755 | libc::S_IFDIR,
file_mode: 0o3770 | libc::S_IFDIR,
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
// Implicitly created directories should not get a sticky bit.
let x_stat = fs::metadata(base.join("x")).context("stat ./x failed")?;
ensure!(x_stat.is_dir());
ensure!(0o755 == x_stat.permissions().mode() & 0o7777);
// Explicitly created directories should.
let y_stat = fs::metadata(base.join("x/y")).context("stat ./x/y failed")?;
ensure!(y_stat.is_dir());
ensure!(0o3770 == y_stat.permissions().mode() & 0o7777);
Ok(())
}),
},
// =================================
// Below are some adversarial tests.
// =================================
TestCase {
name: "Malicious intermediate directory is a symlink".into(),
request: CopyFileRequest {
path: base
.join("a/link/this-could-just-be-shadow-but-I-am-not-risking-it")
.to_string_lossy()
.into(),
dir_mode: 0o700 | libc::S_IFDIR, // Test that the permissions are taken from file_mode.
file_mode: 0o755 | libc::S_IFLNK,
data: b"root:password:19000:0:99999:7:::\n".to_vec(),
..Default::default()
},
should_fail: true,
assertions: Box::new(|base| -> Result<()> {
let link_stat = nix::sys::stat::lstat(&base.join("a/link"))
.context("stat ./a/link failed")?;
ensure!(0o777 | libc::S_IFLNK == link_stat.st_mode);
Ok(())
}),
},
TestCase {
name: "Create a symlink onto an existing symlink".into(),
request: CopyFileRequest {
path: base.join("a/link").to_string_lossy().into(),
dir_mode: 0o700 | libc::S_IFDIR, // Test that the permissions are taken from file_mode.
file_mode: 0o755 | libc::S_IFLNK,
data: b"/etc".to_vec(),
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
// The symlink should be created at the same place (not followed), with the new content.
let a_stat = fs::metadata(base.join("a")).context("stat ./a failed")?;
ensure!(a_stat.is_dir());
ensure!(0o755 == a_stat.permissions().mode() & 0o777);
let link = base.join("a/link");
let link_stat = nix::sys::stat::lstat(&link).context("stat ./a/link failed")?;
// Linux symlinks have no permissions!
ensure!(0o777 | libc::S_IFLNK == link_stat.st_mode);
let target = fs::read_link(&link).context("read_link ./a/link failed")?;
ensure!(target.to_string_lossy() == "/etc");
Ok(())
}),
},
TestCase {
name: "Create a file onto an existing symlink".into(),
request: CopyFileRequest {
path: base.join("a/link").to_string_lossy().into(),
file_mode: 0o600 | libc::S_IFREG,
data: b"Hello!".to_vec(),
..Default::default()
},
should_fail: false,
assertions: Box::new(|base| -> Result<()> {
// The symlink itself should be replaced with the file, not followed.
let link = base.join("a/link");
let link_stat = nix::sys::stat::lstat(&link).context("stat ./a/link failed")?;
ensure!(0o600 | libc::S_IFREG == link_stat.st_mode);
let content = std::fs::read_to_string(&link).context("read ./a/link failed")?;
ensure!("Hello!" == content);
Ok(())
}),
},
];
let uid = unistd::getuid().as_raw() as i32;
let gid = unistd::getgid().as_raw() as i32;
for mut tc in tests {
println!("Running test case: {}", tc.name);
// Since we're in a unit test, using root ownership causes issues with cleaning the temp dir.
tc.request.uid = uid;
tc.request.gid = gid;
let res = do_copy_file(&tc.request, (&base).into());
if tc.should_fail != res.is_err() {
panic!("{}: unexpected do_copy_file result: {:?}", tc.name, res)
}
(tc.assertions)(&base).context(tc.name).unwrap()
}
}
}

View File

@@ -26,23 +26,14 @@ ARCH_DIR = arch
ARCH_FILE_SUFFIX = -options.mk
ARCH_FILE = $(ARCH_DIR)/$(ARCH)$(ARCH_FILE_SUFFIX)
ifeq ($(ARCH), s390x)
UNSUPPORTED_ARCHS := s390x powerpc64le riscv64gc
ifeq ($(filter $(ARCH), $(UNSUPPORTED_ARCHS)),$(ARCH))
default: runtime show-header
test:
@echo "s390x is not currently supported"
@echo "$(ARCH) is not currently supported"
exit 0
install: install-runtime install-configs
else ifeq ($(ARCH), powerpc64le)
default: runtime show-header
test:
@echo "powerpc64le is not currently supported"
exit 0
install: install-runtime install-configs
else ifeq ($(ARCH), riscv64gc)
default: runtime show-header
test:
@echo "RISC-V 64 is not currently supported"
exit 0
else
##TARGET default: build code
default: runtime show-header

View File

@@ -1,7 +1,7 @@
module github.com/kata-containers/kata-containers/src/runtime
// Keep in sync with version in versions.yaml
go 1.25.8
go 1.25.9
// WARNING: Do NOT use `replace` directives as those break dependabot:
// https://github.com/kata-containers/kata-containers/issues/11020

View File

@@ -1,7 +1,7 @@
module kata-containers/csi-kata-directvolume
// Keep in sync with version in versions.yaml
go 1.25.8
go 1.25.9
// WARNING: Do NOT use `replace` directives as those break dependabot:
// https://github.com/kata-containers/kata-containers/issues/11020

View File

@@ -1,7 +1,7 @@
module github.com/kata-containers/kata-containers/src/tools/log-parser
// Keep in sync with version in versions.yaml
go 1.25.8
go 1.25.9
require (
github.com/BurntSushi/toml v1.1.0

View File

@@ -635,7 +635,7 @@ function helm_helper() {
base_values_file="${helm_chart_dir}/try-kata-nvidia-gpu.values.yaml"
fi
;;
qemu-snp|qemu-tdx|qemu-se|qemu-se-runtime-rs|qemu-cca|qemu-coco-dev|qemu-coco-dev-runtime-rs)
qemu-snp|qemu-snp-runtime-rs|qemu-tdx|qemu-se|qemu-se-runtime-rs|qemu-cca|qemu-coco-dev|qemu-coco-dev-runtime-rs)
# Use TEE example file
if [[ -f "${helm_chart_dir}/try-kata-tee.values.yaml" ]]; then
base_values_file="${helm_chart_dir}/try-kata-tee.values.yaml"

View File

@@ -1,7 +1,7 @@
module github.com/kata-containers/tests
// Keep in sync with version in versions.yaml
go 1.25.8
go 1.25.9
// WARNING: Do NOT use `replace` directives as those break dependabot:
// https://github.com/kata-containers/kata-containers/issues/11020

View File

@@ -11,7 +11,7 @@ source "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/confidential_kbs.sh"
SUPPORTED_GPU_TEE_HYPERVISORS=("qemu-nvidia-gpu-snp" "qemu-nvidia-gpu-tdx")
SUPPORTED_TEE_HYPERVISORS=("qemu-snp" "qemu-tdx" "qemu-se" "qemu-se-runtime-rs" "${SUPPORTED_GPU_TEE_HYPERVISORS[@]}")
SUPPORTED_TEE_HYPERVISORS=("qemu-snp" "qemu-snp-runtime-rs" "qemu-tdx" "qemu-se" "qemu-se-runtime-rs" "${SUPPORTED_GPU_TEE_HYPERVISORS[@]}")
SUPPORTED_NON_TEE_HYPERVISORS=("qemu-coco-dev" "qemu-coco-dev-runtime-rs")
function setup_unencrypted_confidential_pod() {
@@ -36,7 +36,7 @@ function get_remote_command_per_hypervisor() {
qemu-se*)
echo "cd /sys/firmware/uv; cat prot_virt_guest | grep 1"
;;
qemu-snp)
qemu-snp|qemu-snp-runtime-rs)
echo "dmesg | grep \"Memory Encryption Features active:.*SEV-SNP\""
;;
qemu-tdx)

View File

@@ -187,7 +187,7 @@ function deploy_kata() {
# Workaround to avoid modifying the workflow yaml files
case "${KATA_HYPERVISOR}" in
qemu-tdx|qemu-snp|qemu-nvidia-gpu-*)
qemu-tdx|qemu-snp|qemu-snp-runtime-rs|qemu-nvidia-gpu-*)
USE_EXPERIMENTAL_SETUP_SNAPSHOTTER=true
SNAPSHOTTER="nydus"
EXPERIMENTAL_FORCE_GUEST_PULL=false
@@ -447,7 +447,7 @@ function cleanup() {
}
function deploy_snapshotter() {
if [[ "${KATA_HYPERVISOR}" == "qemu-tdx" || "${KATA_HYPERVISOR}" == "qemu-snp" ]]; then
if [[ "${KATA_HYPERVISOR}" == "qemu-tdx" || "${KATA_HYPERVISOR}" == "qemu-snp" || "${KATA_HYPERVISOR}" == "qemu-snp-runtime-rs" ]]; then
echo "[Skip] ${SNAPSHOTTER} is pre-installed in the TEE machine"
return
fi
@@ -461,7 +461,7 @@ function deploy_snapshotter() {
}
function cleanup_snapshotter() {
if [[ "${KATA_HYPERVISOR}" == "qemu-tdx" || "${KATA_HYPERVISOR}" == "qemu-snp" ]]; then
if [[ "${KATA_HYPERVISOR}" == "qemu-tdx" || "${KATA_HYPERVISOR}" == "qemu-snp" || "${KATA_HYPERVISOR}" == "qemu-snp-runtime-rs" ]]; then
echo "[Skip] ${SNAPSHOTTER} is pre-installed in the TEE machine"
return
fi

View File

@@ -146,15 +146,22 @@ setup() {
kbs_set_cpu0_resource_policy
# get measured artifacts from qemu command line of previous test
# Go runtime logs: "launching <path> with: [<args>]"
# runtime-rs logs: "qemu args: <args>"
log_line=$(sudo journalctl -r -x -t kata | grep -m 1 'launching.*qemu.*with:' || true)
qemu_cmd=$(echo "$log_line" | sed 's/.*with: \[\(.*\)\]".*/\1/')
if [[ -n "$log_line" ]]; then
qemu_cmd=$(echo "$log_line" | sed 's/.*with: \[\(.*\)\]".*/\1/')
else
log_line=$(sudo journalctl -r -x -t kata | grep -m 1 'qemu args:' || true)
qemu_cmd=$(echo "$log_line" | sed 's/.*qemu args: //')
fi
[[ -n "$qemu_cmd" ]] || { echo "Could not find QEMU command line"; return 1; }
kernel_path=$(echo "$qemu_cmd" | grep -oP -- '-kernel \K[^ ]+')
initrd_path=$(echo "$qemu_cmd" | grep -oP -- '-initrd \K[^ ]+' || true)
firmware_path=$(echo "$qemu_cmd" | grep -oP -- '-bios \K[^ ]+')
vcpu_count=$(echo "$qemu_cmd" | grep -oP -- '-smp \K\d+')
append=$(echo "$qemu_cmd" | sed -n 's/.*-append \(.*\) -bios.*/\1/p')
append=$(echo "$qemu_cmd" | grep -oP -- '-append \K.*?(?= -(smp|bios) )')
# Remove escape backslashes for quotes from output for dm-mod.create parameters
append="${append//\\\"/\"}"

View File

@@ -15,7 +15,7 @@ setup() {
[ "${KATA_HYPERVISOR}" == "qemu-se-runtime-rs" ] && skip "Requires CPU hotplug which isn't supported on ${KATA_HYPERVISOR} yet"
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev* ]] && skip "Requires CPU hotplug which disabled by static_sandbox_resource_mgmt"
( [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] || [ "${KATA_HYPERVISOR}" == "qemu-snp" ] || \
[ "${KATA_HYPERVISOR}" == "qemu-se" ] ) \
[ "${KATA_HYPERVISOR}" == "qemu-snp-runtime-rs" ] || [ "${KATA_HYPERVISOR}" == "qemu-se" ] ) \
&& skip "TEEs do not support memory / CPU hotplug"
pod_name="constraints-cpu-test"
@@ -121,7 +121,7 @@ teardown() {
[ "${KATA_HYPERVISOR}" == "qemu-se-runtime-rs" ] && skip "Requires CPU hotplug which isn't supported on ${KATA_HYPERVISOR} yet"
[[ "${KATA_HYPERVISOR}" == qemu-coco-dev* ]] && skip "Requires CPU hotplug which disabled by static_sandbox_resource_mgmt"
( [ "${KATA_HYPERVISOR}" == "qemu-tdx" ] || [ "${KATA_HYPERVISOR}" == "qemu-snp" ] || \
[ "${KATA_HYPERVISOR}" == "qemu-se" ] ) \
[ "${KATA_HYPERVISOR}" == "qemu-snp-runtime-rs" ] || [ "${KATA_HYPERVISOR}" == "qemu-se" ] ) \
&& skip "TEEs do not support memory / CPU hotplug"
# Debugging information

View File

@@ -9,14 +9,18 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/lib.sh"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
# Currently only the Go runtime provides the config path used here.
# If a Rust hypervisor runs this test, mirror the enabling_hypervisor
# pattern in tests/common.bash to select the correct runtime-rs config.
shim_config_file="/opt/kata/share/defaults/kata-containers/configuration-${KATA_HYPERVISOR}.toml"
case "${KATA_HYPERVISOR}" in
*-runtime-rs)
shim_config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/runtimes/${KATA_HYPERVISOR}/configuration-${KATA_HYPERVISOR}.toml"
;;
*)
shim_config_file="/opt/kata/share/defaults/kata-containers/runtimes/${KATA_HYPERVISOR}/configuration-${KATA_HYPERVISOR}.toml"
;;
esac
check_and_skip() {
case "${KATA_HYPERVISOR}" in
qemu-tdx|qemu-coco-dev|qemu-snp)
qemu-tdx|qemu-coco-dev|qemu-snp|qemu-snp-runtime-rs)
if [ "$(uname -m)" == "s390x" ]; then
skip "measured rootfs tests not implemented for s390x"
fi

View File

@@ -138,7 +138,7 @@ add_runtime_handler_annotations() {
fi
case "${KATA_HYPERVISOR}" in
qemu-coco-dev | qemu-snp | qemu-tdx | qemu-coco-dev-runtime-rs)
qemu-coco-dev | qemu-snp | qemu-snp-runtime-rs | qemu-tdx | qemu-coco-dev-runtime-rs)
info "Add runtime handler annotations for ${KATA_HYPERVISOR}"
local handler_value="kata-${KATA_HYPERVISOR}"
for K8S_TEST_YAML in runtimeclass_workloads_work/*.yaml

View File

@@ -82,7 +82,7 @@ auto_generate_policy_enabled() {
is_coco_platform() {
case "${KATA_HYPERVISOR}" in
"qemu-tdx"|"qemu-snp"|"qemu-coco-dev"|"qemu-coco-dev-runtime-rs"|"qemu-nvidia-gpu-tdx"|"qemu-nvidia-gpu-snp")
"qemu-tdx"|"qemu-snp"|"qemu-snp-runtime-rs"|"qemu-coco-dev"|"qemu-coco-dev-runtime-rs"|"qemu-nvidia-gpu-tdx"|"qemu-nvidia-gpu-snp")
return 0
;;
*)
@@ -148,7 +148,7 @@ install_genpolicy_drop_ins() {
# 20-* OCI version overlay
if [[ "${KATA_HOST_OS:-}" == "cbl-mariner" ]]; then
cp "${examples_dir}/20-oci-1.2.0-drop-in.json" "${settings_d}/"
elif is_k3s_or_rke2 || is_nvidia_gpu_platform || [[ "${KATA_HYPERVISOR}" == "qemu-snp" ]] || [[ "${KATA_HYPERVISOR}" == "qemu-tdx" ]] || [[ -n "${CONTAINER_ENGINE_VERSION:-}" ]]; then
elif is_k3s_or_rke2 || is_nvidia_gpu_platform || [[ "${KATA_HYPERVISOR}" == "qemu-snp" ]] || [[ "${KATA_HYPERVISOR}" == "qemu-snp-runtime-rs" ]] || [[ "${KATA_HYPERVISOR}" == "qemu-tdx" ]] || [[ -n "${CONTAINER_ENGINE_VERSION:-}" ]]; then
cp "${examples_dir}/20-oci-1.3.0-drop-in.json" "${settings_d}/"
fi
@@ -340,7 +340,7 @@ hard_coded_policy_tests_enabled() {
# CI is testing hard-coded policies just on a the platforms listed here. Outside of CI,
# users can enable testing of the same policies (plus the auto-generated policies) by
# specifying AUTO_GENERATE_POLICY=yes.
local -r enabled_hypervisors=("qemu-coco-dev" "qemu-snp" "qemu-tdx" "qemu-coco-dev-runtime-rs")
local -r enabled_hypervisors=("qemu-coco-dev" "qemu-snp" "qemu-snp-runtime-rs" "qemu-tdx" "qemu-coco-dev-runtime-rs")
for enabled_hypervisor in "${enabled_hypervisors[@]}"
do
if [[ "${enabled_hypervisor}" == "${KATA_HYPERVISOR}" ]]; then

View File

@@ -1,7 +1,7 @@
module example.com/m
// Keep in sync with version in versions.yaml
go 1.25.8
go 1.25.9
require (
github.com/BurntSushi/toml v1.3.2

View File

@@ -1,7 +1,7 @@
module module-path
// Keep in sync with version in versions.yaml
go 1.25.8
go 1.25.9
require (
github.com/sirupsen/logrus v1.9.3

View File

@@ -470,12 +470,12 @@ languages:
description: "Google's 'go' language"
notes: "'version' is the default minimum version used by this project."
# When updating this, also update in go.mod files.
version: "1.25.8"
version: "1.25.9"
meta:
description: |
'newest-version' is the latest version known to work when
building Kata
newest-version: "1.25.8"
newest-version: "1.25.9"
rust:
description: "Rust language"