Compare commits

..

4 Commits

Author SHA1 Message Date
Markus Rudy
884b217acf Revert "runtime: generate proto files"
This reverts commit f0c61dc217.
2025-07-31 14:52:40 +02:00
Markus Rudy
aec350f40a ci: add codegen to static-checks
Signed-off-by: Markus Rudy <mr@edgeless.systems>

Fixes: #11631

Co-authored-by: Steve Horsman <steven@uk.ibm.com>
2025-07-31 14:49:26 +02:00
Markus Rudy
418e4d4926 tools: add image for Go proto bindings
In order to have a reproducible code generation process, we need to pin
the versions of the tools used. This is accomplished easiest by
generating inside a container.

This commit adds a container image definition with fixed dependencies
for Golang proto/ttrpc code generation, and changes the agent Makefile
to invoke the update-generated-proto.sh script from within that
container.

Signed-off-by: Markus Rudy <mr@edgeless.systems>
2025-07-31 14:35:07 +02:00
Markus Rudy
f0c61dc217 runtime: generate proto files
The generated Go bindings for the agent are out of date. This commit
was produced by running
src/agent/src/libs/protocols/hack/update-generated-proto.sh with
protobuf compiler versions matching those of the last run, according to
the generated code comments.

Since there are new RPC methods, those needed to be added to the
HybridVSockTTRPCMockImp.

Signed-off-by: Markus Rudy <mr@edgeless.systems>
2025-07-30 10:06:38 +02:00
213 changed files with 1839 additions and 5269 deletions

View File

@@ -23,4 +23,3 @@ self-hosted-runner:
- s390x
- s390x-large
- tdx
- amd64-nvidia-a100

View File

@@ -9,7 +9,8 @@ on:
- labeled
- unlabeled
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -11,8 +11,8 @@ on:
paths:
- '.github/workflows/**'
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -13,7 +13,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-containerd-sandboxapi:

View File

@@ -13,7 +13,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-containerd-sandboxapi:

View File

@@ -12,7 +12,8 @@ on:
required: true
type: string
permissions: {}
permissions:
contents: read
name: Build checks preview riscv64
jobs:

View File

@@ -5,8 +5,8 @@ on:
required: true
type: string
permissions: {}
permissions:
contents: read
name: Build checks
jobs:
@@ -42,10 +42,6 @@ jobs:
path: src/runtime-rs
needs:
- rust
- name: libs
path: src/libs
needs:
- rust
- name: agent-ctl
path: src/tools/agent-ctl
needs:

View File

@@ -23,10 +23,9 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: false
KBUILD_SIGN_PIN:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-asset:
@@ -96,7 +95,6 @@ jobs:
- name: Build ${{ matrix.asset }}
id: build
run: |
[[ "${KATA_ASSET}" == *"nvidia"* ]] && echo "KBUILD_SIGN_PIN=${{ secrets.KBUILD_SIGN_PIN }}" >> "${GITHUB_ENV}"
make "${KATA_ASSET}-tarball"
build_dir=$(readlink -f build)
# store-artifact does not work with symlink
@@ -143,7 +141,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-amd64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error
@@ -152,7 +150,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-amd64-${{ matrix.asset }}-headers${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.xz
retention-days: 15
if-no-files-found: error
@@ -203,7 +201,6 @@ jobs:
- name: Build ${{ matrix.asset }}
id: build
run: |
[[ "${KATA_ASSET}" == *"nvidia"* ]] && echo "KBUILD_SIGN_PIN=${{ secrets.KBUILD_SIGN_PIN }}" >> "${GITHUB_ENV}"
./tests/gha-adjust-to-use-prebuilt-components.sh kata-artifacts "${KATA_ASSET}"
make "${KATA_ASSET}-tarball"
build_dir=$(readlink -f build)
@@ -223,7 +220,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-amd64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error
@@ -315,7 +312,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-amd64-shim-v2${{ inputs.tarball-suffix }}
path: kata-build/kata-static-shim-v2.tar.zst
path: kata-build/kata-static-shim-v2.tar.xz
retention-days: 15
if-no-files-found: error
@@ -352,6 +349,6 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
path: kata-static.tar.zst
path: kata-static.tar.xz
retention-days: 15
if-no-files-found: error

View File

@@ -24,7 +24,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: false
permissions: {}
permissions:
contents: read
jobs:
build-asset:
@@ -120,7 +121,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-arm64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error
@@ -129,7 +130,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-arm64-${{ matrix.asset }}-headers${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.xz
retention-days: 15
if-no-files-found: error
@@ -194,7 +195,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-arm64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error
@@ -281,7 +282,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-arm64-shim-v2${{ inputs.tarball-suffix }}
path: kata-build/kata-static-shim-v2.tar.zst
path: kata-build/kata-static-shim-v2.tar.xz
retention-days: 15
if-no-files-found: error
@@ -318,6 +319,6 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-static-tarball-arm64${{ inputs.tarball-suffix }}
path: kata-static.tar.zst
path: kata-static.tar.xz
retention-days: 15
if-no-files-found: error

View File

@@ -24,7 +24,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-asset:
@@ -82,7 +83,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-ppc64le-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 1
if-no-files-found: error
@@ -147,7 +148,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-ppc64le-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 1
if-no-files-found: error
@@ -220,7 +221,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-ppc64le-shim-v2${{ inputs.tarball-suffix }}
path: kata-build/kata-static-shim-v2.tar.zst
path: kata-build/kata-static-shim-v2.tar.xz
retention-days: 1
if-no-files-found: error
@@ -261,6 +262,6 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-static-tarball-ppc64le${{ inputs.tarball-suffix }}
path: kata-static.tar.zst
path: kata-static.tar.xz
retention-days: 1
if-no-files-found: error

View File

@@ -24,7 +24,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-asset:
@@ -80,6 +81,6 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-riscv64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error

View File

@@ -27,7 +27,8 @@ on:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-asset:
@@ -114,7 +115,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-s390x-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error
@@ -181,7 +182,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-s390x-${{ matrix.asset }}${{ inputs.tarball-suffix }}
path: kata-build/kata-static-${{ matrix.asset }}.tar.zst
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
retention-days: 15
if-no-files-found: error
@@ -229,7 +230,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-s390x${{ inputs.tarball-suffix }}
path: kata-build/kata-static-boot-image-se.tar.zst
path: kata-build/kata-static-boot-image-se.tar.xz
retention-days: 1
if-no-files-found: error
@@ -306,7 +307,7 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-s390x-shim-v2${{ inputs.tarball-suffix }}
path: kata-build/kata-static-shim-v2.tar.zst
path: kata-build/kata-static-shim-v2.tar.xz
retention-days: 15
if-no-files-found: error
@@ -347,6 +348,6 @@ jobs:
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
path: kata-static.tar.zst
path: kata-static.tar.xz
retention-days: 15
if-no-files-found: error

View File

@@ -11,7 +11,8 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
permissions:
contents: read
jobs:
cargo-deny-runner:

View File

@@ -9,7 +9,8 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
permissions:
contents: read
jobs:
kata-containers-ci-on-push:
@@ -30,4 +31,3 @@ jobs:
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -2,7 +2,8 @@ name: Kata Containers CI (manually triggered)
on:
workflow_dispatch:
permissions: {}
permissions:
contents: read
jobs:
kata-containers-ci-on-push:
@@ -26,8 +27,6 @@ jobs:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-checks:
uses: ./.github/workflows/build-checks.yaml

View File

@@ -4,7 +4,8 @@ on:
name: Nightly CI for s390x
permissions: {}
permissions:
contents: read
jobs:
check-internal-test-result:

View File

@@ -7,7 +7,8 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions: {}
permissions:
contents: read
jobs:
kata-containers-ci-on-push:
@@ -30,5 +31,3 @@ jobs:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -3,6 +3,7 @@ on:
pull_request_target:
branches:
- 'main'
- 'stable-*'
types:
# Adding 'labeled' to the list of activity types that trigger this event
# (default: opened, synchronize, reopened) so that we can run this
@@ -13,7 +14,8 @@ on:
- reopened
- labeled
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@@ -50,5 +52,3 @@ jobs:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -27,10 +27,9 @@ on:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-kata-static-tarball-amd64:
@@ -44,8 +43,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64

View File

@@ -35,12 +35,10 @@ on:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
NGC_API_KEY:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
permissions:
contents: read
id-token: write
jobs:
build-kata-static-tarball-amd64:
@@ -54,8 +52,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64
@@ -290,10 +286,6 @@ jobs:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
permissions:
contents: read
id-token: write # Used for OIDC access to log into Azure
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
@@ -331,21 +323,6 @@ jobs:
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
run-k8s-tests-on-nvidia-gpu:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-nvidia-gpu.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
run-kata-coco-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs:
@@ -353,9 +330,6 @@ jobs:
- build-and-publish-tee-confidential-unencrypted-image
- publish-csi-driver-amd64
uses: ./.github/workflows/run-kata-coco-tests.yaml
permissions:
contents: read
id-token: write # Used for OIDC access to log into Azure
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
@@ -409,6 +383,20 @@ jobs:
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
run-metrics-tests:
# Skip metrics tests whilst runner is broken
if: false
# if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-metrics.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
run-basic-amd64-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64

View File

@@ -4,13 +4,13 @@ on:
- cron: "0 0 * * *"
workflow_dispatch:
permissions: {}
permissions:
contents: read
id-token: write
jobs:
cleanup-resources:
runs-on: ubuntu-22.04
permissions:
id-token: write # Used for OIDC access to log into Azure
environment: ci
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2

View File

@@ -19,8 +19,8 @@ on:
schedule:
- cron: '45 0 * * 1'
permissions: {}
permissions:
contents: read
jobs:
analyze:

View File

@@ -6,7 +6,8 @@ on:
- reopened
- synchronize
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -6,7 +6,8 @@ on:
- reopened
- synchronize
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@@ -17,15 +18,13 @@ jobs:
test:
runs-on: macos-latest
steps:
- name: Install Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.23.10
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: Install golang
run: |
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
- name: Build utils
run: ./ci/darwin-test.sh

View File

@@ -2,7 +2,8 @@ on:
schedule:
- cron: '0 23 * * 0'
permissions: {}
permissions:
contents: read
name: Docs URL Alive Check
jobs:
@@ -13,21 +14,23 @@ jobs:
env:
target_branch: ${{ github.base_ref }}
steps:
- name: Install Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.23.10
env:
GOPATH: ${{ github.workspace }}/kata-containers
- name: Set env
run: |
echo "GOPATH=${{ github.workspace }}" >> "$GITHUB_ENV"
echo "${{ github.workspace }}/bin" >> "$GITHUB_PATH"
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
persist-credentials: false
path: ./src/github.com/${{ github.repository }}
- name: Install golang
run: |
./tests/install_go.sh -f -p
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
# docs url alive check
- name: Docs URL Alive Check
run: |
cd "${GOPATH}/src/github.com/${{ github.repository }}" && make docs-url-alive-check

View File

@@ -31,7 +31,8 @@ on:
skip_static:
value: ${{ jobs.skipper.outputs.skip_static }}
permissions: {}
permissions:
contents: read
jobs:
skipper:

View File

@@ -12,7 +12,8 @@ on:
- reopened
- labeled
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -3,7 +3,8 @@ on:
name: Govulncheck
permissions: {}
permissions:
contents: read
jobs:
govulncheck:
@@ -13,12 +14,12 @@ jobs:
include:
- binary: "kata-runtime"
make_target: "runtime"
- binary: "containerd-shim-kata-v2"
- binary: "containerd-shim-kata-v2"
make_target: "containerd-shim-v2"
- binary: "kata-monitor"
make_target: "monitor"
fail-fast: false
steps:
- name: Checkout the code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4

View File

@@ -6,7 +6,8 @@ on:
- reopened
- synchronize
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -15,8 +15,6 @@ on:
push:
branches: [ "main" ]
permissions: {}
jobs:
scan-scheduled:
permissions:

View File

@@ -5,7 +5,8 @@ on:
- main
workflow_dispatch:
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@@ -24,7 +25,6 @@ jobs:
target-branch: ${{ github.ref_name }}
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-assets-arm64:
permissions:

View File

@@ -34,7 +34,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
kata-payload:
@@ -84,6 +85,6 @@ jobs:
TAG: ${{ inputs.tag }}
run: |
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)/kata-static.tar.zst" \
"$(pwd)/kata-static.tar.xz" \
"${REGISTRY}/${REPO}" \
"${TAG}"

View File

@@ -8,10 +8,9 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-kata-static-tarball-amd64:
@@ -21,7 +20,6 @@ jobs:
stage: release
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
permissions:
contents: read
packages: write
@@ -73,9 +71,9 @@ jobs:
fi
for tag in "${tags[@]}"; do
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "ghcr.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "quay.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
done

View File

@@ -9,7 +9,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-kata-static-tarball-arm64:
@@ -70,9 +71,9 @@ jobs:
fi
for tag in "${tags[@]}"; do
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "ghcr.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "quay.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
done

View File

@@ -9,7 +9,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-kata-static-tarball-ppc64le:
@@ -70,9 +71,9 @@ jobs:
fi
for tag in "${tags[@]}"; do
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "ghcr.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "quay.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
done

View File

@@ -11,7 +11,8 @@ on:
QUAY_DEPLOYER_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
build-kata-static-tarball-s390x:
@@ -74,9 +75,9 @@ jobs:
fi
for tag in "${tags[@]}"; do
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "ghcr.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
"$(pwd)"/kata-static.tar.zst "quay.io/kata-containers/kata-deploy" \
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
"${tag}-${TARGET_ARCH}"
done

View File

@@ -2,7 +2,8 @@ name: Release Kata Containers
on:
workflow_dispatch
permissions: {}
permissions:
contents: read
jobs:
release:
@@ -34,7 +35,6 @@ jobs:
target-arch: amd64
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-and-push-assets-arm64:
needs: release
@@ -126,7 +126,7 @@ jobs:
- name: Set KATA_STATIC_TARBALL env var
run: |
tarball=$(pwd)/kata-static.tar.zst
tarball=$(pwd)/kata-static.tar.xz
echo "KATA_STATIC_TARBALL=${tarball}" >> "$GITHUB_ENV"
- name: Download amd64 artifacts

View File

@@ -1,6 +1,7 @@
name: CI | Run cri-containerd tests
permissions: {}
permissions:
contents: read
on:
workflow_call:

View File

@@ -34,7 +34,9 @@ on:
required: true
permissions: {}
permissions:
contents: read
id-token: write
jobs:
run-k8s-tests:
@@ -69,9 +71,6 @@ jobs:
instance-type: normal
auto-generate-policy: yes
runs-on: ubuntu-22.04
permissions:
contents: read
id-token: write # Used for OIDC access to log into Azure
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}

View File

@@ -22,7 +22,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-k8s-tests-amd64:

View File

@@ -22,7 +22,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-k8s-tests-on-arm64:

View File

@@ -1,89 +0,0 @@
name: CI | Run NVIDIA GPU kubernetes tests on arm64
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
secrets:
NGC_API_KEY:
required: true
permissions: {}
jobs:
run-nvidia-gpu-tests-on-amd64:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-nvidia-gpu
k8s:
- kubeadm
runs-on: amd64-nvidia-a100
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: ${{ matrix.k8s }}
USING_NFD: "false"
K8S_TEST_HOST_TYPE: all
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
persist-credentials: false
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
- name: Install `bats`
run: bash tests/integration/kubernetes/gha-run.sh install-bats
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-nv-tests
env:
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
- name: Collect artifacts ${{ matrix.vmm }}
if: always()
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
continue-on-error: true
- name: Archive artifacts ${{ matrix.vmm }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.k8s }}-${{ inputs.tag }}
path: /tmp/artifacts
retention-days: 1
- name: Delete kata-deploy
if: always()
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh cleanup

View File

@@ -22,7 +22,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-k8s-tests:

View File

@@ -25,7 +25,8 @@ on:
AUTHENTICATED_IMAGE_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
jobs:
run-k8s-tests:

View File

@@ -35,7 +35,9 @@ on:
AUTHENTICATED_IMAGE_PASSWORD:
required: true
permissions: {}
permissions:
contents: read
id-token: write
jobs:
# Generate jobs for testing CoCo on non-TEE environments
@@ -50,9 +52,6 @@ jobs:
pull-type:
- guest-pull
runs-on: ubuntu-22.04
permissions:
id-token: write # Used for OIDC access to log into Azure
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
@@ -92,6 +91,9 @@ jobs:
- name: Install kata
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-artifacts
- name: Download Azure CLI
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:

View File

@@ -36,7 +36,9 @@ on:
ITA_KEY:
required: true
permissions: {}
permissions:
contents: read
id-token: write
jobs:
run-k8s-tests-on-tdx:
@@ -221,8 +223,6 @@ jobs:
pull-type:
- guest-pull
runs-on: ubuntu-22.04
permissions:
id-token: write # Used for OIDC access to log into Azure
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
@@ -268,6 +268,9 @@ jobs:
- name: Install kata
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-artifacts
- name: Download Azure CLI
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:

View File

@@ -29,7 +29,9 @@ on:
AZ_SUBSCRIPTION_ID:
required: true
permissions: {}
permissions:
contents: read
id-token: write
jobs:
run-kata-deploy-tests:
@@ -48,8 +50,6 @@ jobs:
vmm: clh
runs-on: ubuntu-22.04
environment: ci
permissions:
id-token: write # Used for OIDC access to log into Azure
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
@@ -72,6 +72,9 @@ jobs:
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Download Azure CLI
run: bash tests/functional/kata-deploy/gha-run.sh install-azure-cli
- name: Log into the Azure account
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:

View File

@@ -22,7 +22,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-kata-deploy-tests:

View File

@@ -13,7 +13,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-monitor:

View File

@@ -22,7 +22,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-metrics:

View File

@@ -13,7 +13,8 @@ on:
type: string
default: ""
permissions: {}
permissions:
contents: read
jobs:
run-runk:

View File

@@ -10,7 +10,8 @@ on:
- reopened
- synchronize
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -11,7 +11,8 @@ on:
- reopened
- synchronize
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -4,7 +4,8 @@ on:
- cron: '0 0 * * *'
workflow_dispatch:
permissions: {}
permissions:
contents: read
jobs:
stale:

View File

@@ -6,7 +6,8 @@ on:
- reopened
- labeled # a workflow runs only when the 'ok-to-test' label is added
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -7,7 +7,8 @@ on:
- synchronize
workflow_dispatch:
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -5,7 +5,8 @@ on:
branches: ["main"]
pull_request:
permissions: {}
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

View File

@@ -42,7 +42,7 @@ generate-protocols:
# Some static checks rely on generated source files of components.
static-checks: static-checks-build
bash tests/static-checks.sh
bash tests/static-checks.sh github.com/kata-containers/kata-containers
docs-url-alive-check:
bash ci/docs-url-alive-check.sh

View File

@@ -1 +1 @@
3.20.0
3.19.1

View File

@@ -306,7 +306,7 @@ tarball to the newly created VM that will be used for debugging purposes.
> [!NOTE]
> Those artifacts are only available (for 15 days) when all jobs are finished.
Once you have the `kata-static.tar.zst` in your VM, you can login to the VM with
Once you have the `kata-static.tar.xz` in your VM, you can login to the VM with
`kcli ssh debug-nerdctl-pr8070`, go ahead and then clone your development branch
```bash
@@ -323,15 +323,15 @@ $ git config --global user.name "Your Name"
$ git rebase upstream/main
```
Now copy the `kata-static.tar.zst` into your `kata-containers/kata-artifacts` directory
Now copy the `kata-static.tar.xz` into your `kata-containers/kata-artifacts` directory
```bash
$ mkdir kata-artifacts
$ cp ../kata-static.tar.zst kata-artifacts/
$ cp ../kata-static.tar.xz kata-artifacts/
```
> [!NOTE]
> If you downloaded the .zip from GitHub you need to uncompress first to see `kata-static.tar.zst`
> If you downloaded the .zip from GitHub you need to uncompress first to see `kata-static.tar.xz`
And finally run the tests following what's in the yaml file for the test you're
debugging.
@@ -363,11 +363,11 @@ and have fun debugging and hacking!
Steps for debugging the Kubernetes tests are very similar to the ones for
debugging non-Kubernetes tests, with the caveat that what you'll need, this
time, is not the `kata-static.tar.zst` tarball, but rather a payload to be used
time, is not the `kata-static.tar.xz` tarball, but rather a payload to be used
with kata-deploy.
In order to generate your own kata-deploy image you can generate your own
`kata-static.tar.zst` and then take advantage of the following script. Be aware
`kata-static.tar.xz` and then take advantage of the following script. Be aware
that the image generated and uploaded must be accessible by the VM where you'll
be performing your tests.

View File

@@ -89,16 +89,16 @@ However, if any of these components are absent, they must be built from the
$ # Assume that the project is cloned at $GOPATH/src/github.com/kata-containers
$ cd $GOPATH/src/github.com/kata-containers/kata-containers
$ make rootfs-initrd-confidential-tarball
$ tar --zstd -tf build/kata-static-kernel-confidential.tar.zst | grep vmlinuz
$ tar -tf build/kata-static-kernel-confidential.tar.xz | grep vmlinuz
./opt/kata/share/kata-containers/vmlinuz-confidential.container
./opt/kata/share/kata-containers/vmlinuz-6.7-136-confidential
$ kernel_version=6.7-136
$ tar --zstd -tf build/kata-static-rootfs-initrd-confidential.tar.zst | grep initrd
$ tar -tf build/kata-static-rootfs-initrd-confidential.tar.xz | grep initrd
./opt/kata/share/kata-containers/kata-containers-initrd-confidential.img
./opt/kata/share/kata-containers/kata-ubuntu-20.04-confidential.initrd
$ mkdir artifacts
$ tar --zstd -xvf build/kata-static-kernel-confidential.tar.zst -C artifacts ./opt/kata/share/kata-containers/vmlinuz-${kernel_version}-confidential
$ tar --zstd -xvf build/kata-static-rootfs-initrd-confidential.tar.zst -C artifacts ./opt/kata/share/kata-containers/kata-ubuntu-20.04-confidential.initrd
$ tar -xvf build/kata-static-kernel-confidential.tar.xz -C artifacts ./opt/kata/share/kata-containers/vmlinuz-${kernel_version}-confidential
$ tar -xvf build/kata-static-rootfs-initrd-confidential.tar.xz -C artifacts ./opt/kata/share/kata-containers/kata-ubuntu-20.04-confidential.initrd
$ ls artifacts/opt/kata/share/kata-containers/
kata-ubuntu-20.04-confidential.initrd vmlinuz-${kernel_version}-confidential
```
@@ -190,8 +190,8 @@ can be easily accomplished by issuing the following make target:
$ cd $GOPATH/src/github.com/kata-containers/kata-containers
$ mkdir hkd_dir && cp $host_key_document hkd_dir
$ HKD_PATH=hkd_dir SE_KERNEL_PARAMS="agent.log=debug" make boot-image-se-tarball
$ ls build/kata-static-boot-image-se.tar.zst
build/kata-static-boot-image-se.tar.zst
$ ls build/kata-static-boot-image-se.tar.xz
build/kata-static-boot-image-se.tar.xz
```
`SE_KERNEL_PARAMS` could be used to add any extra kernel parameters. If no additional kernel configuration is required, this can be omitted.
@@ -344,18 +344,18 @@ $ make virtiofsd-tarball
$ make shim-v2-tarball
$ mkdir kata-artifacts
$ build_dir=$(readlink -f build)
$ cp -r $build_dir/*.tar.zst kata-artifacts
$ cp -r $build_dir/*.tar.xz kata-artifacts
$ ls -1 kata-artifacts
kata-static-agent.tar.zst
kata-static-boot-image-se.tar.zst
kata-static-coco-guest-components.tar.zst
kata-static-kernel-confidential-modules.tar.zst
kata-static-kernel-confidential.tar.zst
kata-static-pause-image.tar.zst
kata-static-qemu.tar.zst
kata-static-rootfs-initrd-confidential.tar.zst
kata-static-shim-v2.tar.zst
kata-static-virtiofsd.tar.zst
kata-static-agent.tar.xz
kata-static-boot-image-se.tar.xz
kata-static-coco-guest-components.tar.xz
kata-static-kernel-confidential-modules.tar.xz
kata-static-kernel-confidential.tar.xz
kata-static-pause-image.tar.xz
kata-static-qemu.tar.xz
kata-static-rootfs-initrd-confidential.tar.xz
kata-static-shim-v2.tar.xz
kata-static-virtiofsd.tar.xz
$ ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
```
@@ -369,7 +369,7 @@ command before running `kata-deploy-merge-builds.sh`:
$ make rootfs-image-tarball
```
At this point, you should have an archive file named `kata-static.tar.zst` at the project root,
At this point, you should have an archive file named `kata-static.tar.xz` at the project root,
which will be used to build a payload image. If you are using a local container registry at
`localhost:5000`, proceed with the following:
@@ -381,7 +381,7 @@ Build and push a payload image with the name `localhost:5000/build-kata-deploy`
`latest` using the following:
```
$ ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh kata-static.tar.zst localhost:5000/build-kata-deploy latest
$ ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh kata-static.tar.xz localhost:5000/build-kata-deploy latest
... logs ...
Pushing the image localhost:5000/build-kata-deploy:latest to the registry
The push refers to repository [localhost:5000/build-kata-deploy]

114
src/agent/Cargo.lock generated
View File

@@ -508,15 +508,6 @@ dependencies = [
"wyz",
]
[[package]]
name = "block-buffer"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
dependencies = [
"generic-array",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
@@ -898,16 +889,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "crypto-mac"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
dependencies = [
"generic-array",
"subtle",
]
[[package]]
name = "darling"
version = "0.14.4"
@@ -1052,22 +1033,13 @@ dependencies = [
"syn 2.0.101",
]
[[package]]
name = "digest"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
dependencies = [
"generic-array",
]
[[package]]
name = "digest"
version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer 0.10.4",
"block-buffer",
"crypto-common",
]
@@ -1571,16 +1543,6 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hmac"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
dependencies = [
"crypto-mac",
"digest 0.9.0",
]
[[package]]
name = "home"
version = "0.5.9"
@@ -2087,7 +2049,7 @@ dependencies = [
"serde",
"serde_json",
"serial_test",
"sha2 0.10.9",
"sha2",
"slog",
"slog-scope",
"slog-stdlog",
@@ -2171,7 +2133,7 @@ dependencies = [
"serde",
"serde-enum-str",
"serde_json",
"sha2 0.10.9",
"sha2",
"slog",
"slog-scope",
"sysinfo",
@@ -2248,23 +2210,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a7cbbd4ad467251987c6e5b47d53b11a5a05add08f2447a9e2d70aef1e0d138"
[[package]]
name = "libsystemd"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f4f0b5b062ba67aa075e331de778082c09e66b5ef32970ea5a1e9c37c9555d1"
dependencies = [
"hmac",
"libc",
"log",
"nix 0.23.2",
"once_cell",
"serde",
"sha2 0.9.9",
"thiserror 1.0.69",
"uuid 0.8.2",
]
[[package]]
name = "libz-sys"
version = "1.1.22"
@@ -2328,7 +2273,6 @@ dependencies = [
"serde_json",
"slog",
"slog-async",
"slog-journald",
"slog-json",
"slog-scope",
"slog-term",
@@ -2790,12 +2734,6 @@ version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "opaque-debug"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
[[package]]
name = "opentelemetry"
version = "0.14.0"
@@ -3560,7 +3498,7 @@ dependencies = [
"rkyv_derive",
"seahash",
"tinyvec",
"uuid 1.16.0",
"uuid",
]
[[package]]
@@ -3973,20 +3911,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
dependencies = [
"cfg-if",
"cpufeatures",
"digest 0.10.7",
]
[[package]]
name = "sha2"
version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800"
dependencies = [
"block-buffer 0.9.0",
"cfg-if",
"cpufeatures",
"digest 0.9.0",
"opaque-debug",
"digest",
]
[[package]]
@@ -3997,7 +3922,7 @@ checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest 0.10.7",
"digest",
]
[[package]]
@@ -4069,16 +3994,6 @@ dependencies = [
"thread_local",
]
[[package]]
name = "slog-journald"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83e14eb8c2f5d0c8fc9fbac40e6391095e4dc5cb334f7dce99c75cb1919eb39c"
dependencies = [
"libsystemd",
"slog",
]
[[package]]
name = "slog-json"
version = "2.6.1"
@@ -4218,12 +4133,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.109"
@@ -4785,15 +4694,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
dependencies = [
"serde",
]
[[package]]
name = "uuid"
version = "1.16.0"
@@ -4807,7 +4707,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b082222b4f6619906941c17eb2297fff4c2fb96cb60164170522942a200bd8"
dependencies = [
"outref",
"uuid 1.16.0",
"uuid",
"vsimd",
]

View File

@@ -22,8 +22,6 @@ use protocols::{
};
use safe_path::scoped_join;
use std::fs;
use std::fs::File;
use std::io::{self, Read};
use std::path::Path;
use std::{os::unix::fs::symlink, path::PathBuf};
use tokio::sync::OnceCell;
@@ -237,8 +235,8 @@ pub async fn unseal_file(path: &str) -> Result<()> {
}
let secret_name = entry.file_name();
if content_starts_with_prefix(&target_path, SEALED_SECRET_PREFIX).await? {
let contents = fs::read_to_string(&target_path)?;
let contents = fs::read_to_string(&target_path)?;
if contents.starts_with(SEALED_SECRET_PREFIX) {
// Get the directory name of the sealed secret file
let dir_name = target_path
.parent()
@@ -264,17 +262,6 @@ pub async fn unseal_file(path: &str) -> Result<()> {
Ok(())
}
pub async fn content_starts_with_prefix(path: &Path, prefix: &str) -> io::Result<bool> {
let mut file = File::open(path)?;
let mut buffer = vec![0u8; prefix.len()];
match file.read_exact(&mut buffer) {
Ok(()) => Ok(buffer == prefix.as_bytes()),
Err(ref e) if e.kind() == io::ErrorKind::UnexpectedEof => Ok(false),
Err(e) => Err(e),
}
}
pub async fn secure_mount(
volume_type: &str,
options: &std::collections::HashMap<String, String>,
@@ -307,7 +294,7 @@ mod tests {
use std::fs::File;
use std::io::{Read, Write};
use std::sync::Arc;
use tempfile::{tempdir, NamedTempFile};
use tempfile::tempdir;
use test_utils::skip_if_not_root;
use tokio::signal::unix::{signal, SignalKind};
struct TestService;
@@ -429,34 +416,4 @@ mod tests {
rt.shutdown_background();
std::thread::sleep(std::time::Duration::from_secs(2));
}
#[tokio::test]
async fn test_content_starts_with_prefix() {
// Normal case: content matches the prefix
let mut f = NamedTempFile::new().unwrap();
write!(f, "sealed.hello_world").unwrap();
assert!(content_starts_with_prefix(f.path(), "sealed.")
.await
.unwrap());
// Does not match the prefix
let mut f2 = NamedTempFile::new().unwrap();
write!(f2, "notsealed.hello_world").unwrap();
assert!(!content_starts_with_prefix(f2.path(), "sealed.")
.await
.unwrap());
// File length < prefix.len()
let mut f3 = NamedTempFile::new().unwrap();
write!(f3, "seal").unwrap();
assert!(!content_starts_with_prefix(f3.path(), "sealed.")
.await
.unwrap());
// Empty file
let f4 = NamedTempFile::new().unwrap();
assert!(!content_starts_with_prefix(f4.path(), "sealed.")
.await
.unwrap());
}
}

View File

@@ -34,7 +34,8 @@ pub fn is_ephemeral_volume(mount: &Mount) -> bool {
mount.destination(),
),
(Some("bind"), Some(source), dest) if get_linux_mount_info(source).is_ok_and(|info| info.fs_type == "tmpfs") &&
(Some("bind"), Some(source), dest) if get_linux_mount_info(source)
.map_or(false, |info| info.fs_type == "tmpfs") &&
(is_empty_dir(source) || dest.as_path() == Path::new("/dev/shm"))
)
}

View File

@@ -823,11 +823,11 @@ mod tests {
#[test]
fn test_get_linux_mount_info() {
let info = get_linux_mount_info("/dev/shm").unwrap();
let info = get_linux_mount_info("/sys/fs/cgroup").unwrap();
assert_eq!(&info.device, "tmpfs");
assert_eq!(&info.fs_type, "tmpfs");
assert_eq!(&info.path, "/dev/shm");
assert_eq!(&info.path, "/sys/fs/cgroup");
assert!(matches!(
get_linux_mount_info(""),

View File

@@ -168,7 +168,7 @@ pub fn is_valid_numa_cpu(cpus: &[u32]) -> Result<bool> {
let numa_nodes = get_numa_nodes()?;
for cpu in cpus {
if !numa_nodes.contains_key(cpu) {
if numa_nodes.get(cpu).is_none() {
return Ok(false);
}
}

View File

@@ -66,7 +66,7 @@ impl PCIDevices for NvidiaPCIDevice {
}
}
nvidia_devices
return nvidia_devices;
}
}

View File

@@ -7,7 +7,7 @@
use std::collections::HashMap;
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use mockall::automock;
use pci_ids::{Classes, Vendors};
@@ -61,22 +61,24 @@ pub(crate) trait MemoryResourceTrait {
impl MemoryResourceTrait for MemoryResources {
fn get_total_addressable_memory(&self, round_up: bool) -> (u64, u64) {
let mut num_bar = 0;
let mut mem_size_32bit = 0u64;
let mut mem_size_64bit = 0u64;
let mut keys: Vec<_> = self.keys().cloned().collect();
keys.sort();
for (num_bar, key) in keys.into_iter().enumerate() {
if key >= PCI_IOV_NUM_BAR || num_bar == PCI_IOV_NUM_BAR {
for key in keys {
if key as usize >= PCI_IOV_NUM_BAR || num_bar == PCI_IOV_NUM_BAR {
break;
}
num_bar += 1;
if let Some(region) = self.get(&key) {
let flags = region.flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
let mem_type_32bit = flags == PCI_BASE_ADDRESS_MEM_TYPE32;
let mem_type_64bit = flags == PCI_BASE_ADDRESS_MEM_TYPE64;
let mem_size = region.end - region.start + 1;
let mem_size = (region.end - region.start + 1) as u64;
if mem_type_32bit {
mem_size_32bit += mem_size;
@@ -136,10 +138,10 @@ impl PCIDeviceManager {
for entry in device_dirs {
let device_dir = entry?;
let device_address = device_dir.file_name().to_string_lossy().to_string();
if let Ok(Some(dev)) =
self.get_device_by_pci_bus_id(&device_address, vendor, &mut cache)
{
pci_devices.push(dev);
if let Ok(device) = self.get_device_by_pci_bus_id(&device_address, vendor, &mut cache) {
if let Some(dev) = device {
pci_devices.push(dev);
}
}
}
@@ -236,7 +238,7 @@ impl PCIDeviceManager {
Ok(Some(pci_device))
}
fn parse_resources(&self, device_path: &Path) -> io::Result<MemoryResources> {
fn parse_resources(&self, device_path: &PathBuf) -> io::Result<MemoryResources> {
let content = fs::read_to_string(device_path.join("resource"))?;
let mut resources: MemoryResources = MemoryResources::new();
for (i, line) in content.lines().enumerate() {
@@ -403,8 +405,6 @@ mod tests {
#[test]
fn test_parse_resources() {
setup_mock_device_files();
let manager = PCIDeviceManager::new(MOCK_PCI_DEVICES_ROOT);
let device_path = PathBuf::from(MOCK_PCI_DEVICES_ROOT).join("0000:ff:1f.0");
@@ -418,8 +418,6 @@ mod tests {
assert_eq!(resource.start, 0x00000000);
assert_eq!(resource.end, 0x0000ffff);
assert_eq!(resource.flags, 0x00000404);
cleanup_mock_device_files();
}
#[test]
@@ -437,7 +435,10 @@ mod tests {
file.write_all(&vec![0; 512]).unwrap();
// It should be true
assert!(is_pcie_device("ff:00.0", MOCK_SYS_BUS_PCI_DEVICES));
assert!(is_pcie_device(
&format!("ff:00.0"),
MOCK_SYS_BUS_PCI_DEVICES
));
// Clean up
let _ = fs::remove_file(config_path);

View File

@@ -142,11 +142,14 @@ pub fn arch_guest_protection(
#[allow(dead_code)]
pub fn available_guest_protection() -> Result<GuestProtection, ProtectionError> {
if !Uid::effective().is_root() {
Err(ProtectionError::NoPerms)?;
return Err(ProtectionError::NoPerms)?;
}
let facilities = crate::cpu::retrieve_cpu_facilities().map_err(|err| {
ProtectionError::CheckFailed(format!("Error retrieving cpu facilities file : {}", err))
ProtectionError::CheckFailed(format!(
"Error retrieving cpu facilities file : {}",
err.to_string()
))
})?;
// Secure Execution

View File

@@ -8,6 +8,7 @@ use std::collections::HashMap;
use std::fs::File;
use std::io::{self, BufReader, Result};
use std::result::{self};
use std::u32;
use serde::Deserialize;
@@ -462,12 +463,12 @@ impl Annotation {
/// update config info by annotation
pub fn update_config_by_annotation(&self, config: &mut TomlConfig) -> Result<()> {
if let Some(hv) = self.annotations.get(KATA_ANNO_CFG_RUNTIME_HYPERVISOR) {
if config.hypervisor.contains_key(hv) {
if config.hypervisor.get(hv).is_some() {
config.runtime.hypervisor_name = hv.to_string();
}
}
if let Some(ag) = self.annotations.get(KATA_ANNO_CFG_RUNTIME_AGENT) {
if config.agent.contains_key(ag) {
if config.agent.get(ag).is_some() {
config.runtime.agent_name = ag.to_string();
}
}
@@ -634,13 +635,13 @@ impl Annotation {
KATA_ANNO_CFG_HYPERVISOR_CPU_FEATURES => {
hv.cpu_info.cpu_features = value.to_string();
}
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_VCPUS => match self.get_value::<f32>(key) {
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_VCPUS => match self.get_value::<i32>(key) {
Ok(num_cpus) => {
let num_cpus = num_cpus.unwrap_or_default();
if num_cpus
> get_hypervisor_plugin(hypervisor_name)
.unwrap()
.get_max_cpus() as f32
.get_max_cpus() as i32
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
@@ -943,7 +944,8 @@ impl Annotation {
}
}
KATA_ANNO_CFG_HYPERVISOR_VIRTIO_FS_EXTRA_ARGS => {
let args: Vec<String> = value.split(',').map(str::to_string).collect();
let args: Vec<String> =
value.to_string().split(',').map(str::to_string).collect();
for arg in args {
hv.shared_fs.virtio_fs_extra_args.push(arg.to_string());
}
@@ -969,7 +971,7 @@ impl Annotation {
// update agent config
KATA_ANNO_CFG_KERNEL_MODULES => {
let kernel_mod: Vec<String> =
value.split(';').map(str::to_string).collect();
value.to_string().split(';').map(str::to_string).collect();
for modules in kernel_mod {
ag.kernel_modules.push(modules.to_string());
}
@@ -990,16 +992,14 @@ impl Annotation {
return Err(u32_err);
}
},
KATA_ANNO_CFG_RUNTIME_CREATE_CONTAINTER_TIMEOUT => {
match self.get_value::<u32>(key) {
Ok(v) => {
ag.request_timeout_ms = v.unwrap_or_default() * 1000;
}
Err(_e) => {
return Err(u32_err);
}
KATA_ANNO_CFG_RUNTIME_CREATE_CONTAINTER_TIMEOUT => match self.get_value::<u32>(key) {
Ok(v) => {
ag.request_timeout_ms = v.unwrap_or_default() * 1000;
}
}
Err(_e) => {
return Err(u32_err);
}
},
// update runtime config
KATA_ANNO_CFG_RUNTIME_NAME => {
let runtime = vec!["virt-container", "linux-container", "wasm-container"];
@@ -1032,7 +1032,8 @@ impl Annotation {
}
},
KATA_ANNO_CFG_EXPERIMENTAL => {
let args: Vec<String> = value.split(',').map(str::to_string).collect();
let args: Vec<String> =
value.to_string().split(',').map(str::to_string).collect();
for arg in args {
config.runtime.experimental.push(arg.to_string());
}
@@ -1078,9 +1079,6 @@ impl Annotation {
}
}
}
config.adjust_config()?;
Ok(())
}
}

View File

@@ -6,7 +6,6 @@
use std::io::Result;
use crate::config::{ConfigOps, TomlConfig};
use serde::{Deserialize, Deserializer};
pub use vendor::AgentVendor;
@@ -116,11 +115,7 @@ pub struct Agent {
/// This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
/// It's also used to ensure that workloads, especially those involving large image pulls within the guest,
/// have sufficient time to complete.
#[serde(
default = "default_request_timeout",
rename = "create_container_timeout",
deserialize_with = "deserialize_secs_to_millis"
)]
#[serde(default = "default_request_timeout", rename = "create_container_timeout")]
pub request_timeout_ms: u32,
/// Agent health check request timeout value in millisecond
@@ -132,12 +127,12 @@ pub struct Agent {
/// These modules will be loaded in the guest kernel using modprobe(8).
/// The following example can be used to load two kernel modules with parameters:
/// - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
/// The first word is considered as the module name and the rest as its parameters.
/// Container will not be started when:
/// The first word is considered as the module name and the rest as its parameters.
/// Container will not be started when:
/// - A kernel module is specified and the modprobe command is not installed in the guest
/// or it fails loading the module.
/// - The module is not available in the guest or it doesn't met the guest kernel
/// requirements, like architecture and version.
/// requirements, like architecture and version.
#[serde(default)]
pub kernel_modules: Vec<String>,
@@ -207,15 +202,6 @@ fn default_health_check_timeout() -> u32 {
90_000
}
fn deserialize_secs_to_millis<'de, D>(deserializer: D) -> std::result::Result<u32, D::Error>
where
D: Deserializer<'de>,
{
let secs = u32::deserialize(deserializer)?;
Ok(secs.saturating_mul(1000))
}
impl Agent {
fn validate(&self) -> Result<()> {
if self.dial_timeout_ms == 0 {

View File

@@ -369,7 +369,7 @@ mod drop_in_directory_handling {
config.hypervisor["qemu"].path,
"/usr/bin/qemu-kvm".to_string()
);
assert_eq!(config.hypervisor["qemu"].cpu_info.default_vcpus, 2.0);
assert_eq!(config.hypervisor["qemu"].cpu_info.default_vcpus, 2);
assert_eq!(config.hypervisor["qemu"].device_info.default_bridges, 4);
assert_eq!(
config.hypervisor["qemu"].shared_fs.shared_fs.as_deref(),

View File

@@ -109,7 +109,7 @@ impl ConfigPlugin for CloudHypervisorConfig {
return Err(eother!("Both guest boot image and initrd for CH are empty"));
}
if (ch.cpu_info.default_vcpus > 0.0
if (ch.cpu_info.default_vcpus > 0
&& ch.cpu_info.default_vcpus as u32 > default::MAX_CH_VCPUS)
|| ch.cpu_info.default_maxvcpus > default::MAX_CH_VCPUS
{

View File

@@ -6,6 +6,7 @@
use std::io::Result;
use std::path::Path;
use std::sync::Arc;
use std::u32;
use super::{default, register_hypervisor_plugin};
use crate::config::default::MAX_DRAGONBALL_VCPUS;
@@ -65,7 +66,7 @@ impl ConfigPlugin for DragonballConfig {
}
if db.cpu_info.default_vcpus as u32 > db.cpu_info.default_maxvcpus {
db.cpu_info.default_vcpus = db.cpu_info.default_maxvcpus as f32;
db.cpu_info.default_vcpus = db.cpu_info.default_maxvcpus as i32;
}
if db.machine_info.entropy_source.is_empty() {
@@ -134,7 +135,7 @@ impl ConfigPlugin for DragonballConfig {
));
}
if (db.cpu_info.default_vcpus > 0.0
if (db.cpu_info.default_vcpus > 0
&& db.cpu_info.default_vcpus as u32 > default::MAX_DRAGONBALL_VCPUS)
|| db.cpu_info.default_maxvcpus > default::MAX_DRAGONBALL_VCPUS
{

View File

@@ -93,7 +93,7 @@ impl ConfigPlugin for FirecrackerConfig {
));
}
if (firecracker.cpu_info.default_vcpus > 0.0
if (firecracker.cpu_info.default_vcpus > 0
&& firecracker.cpu_info.default_vcpus as u32 > default::MAX_FIRECRACKER_VCPUS)
|| firecracker.cpu_info.default_maxvcpus > default::MAX_FIRECRACKER_VCPUS
{

File diff suppressed because it is too large Load Diff

View File

@@ -128,7 +128,7 @@ impl ConfigPlugin for QemuConfig {
}
}
if (qemu.cpu_info.default_vcpus > 0.0
if (qemu.cpu_info.default_vcpus > 0
&& qemu.cpu_info.default_vcpus as u32 > default::MAX_QEMU_VCPUS)
|| qemu.cpu_info.default_maxvcpus > default::MAX_QEMU_VCPUS
{

View File

@@ -9,6 +9,7 @@ use std::fs;
use std::io::{self, Result};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::u32;
use lazy_static::lazy_static;
@@ -130,7 +131,9 @@ impl TomlConfig {
pub fn load_from_file<P: AsRef<Path>>(config_file: P) -> Result<(TomlConfig, PathBuf)> {
let mut result = Self::load_raw_from_file(config_file);
if let Ok((ref mut config, _)) = result {
config.adjust_config()?;
Hypervisor::adjust_config(config)?;
Runtime::adjust_config(config)?;
Agent::adjust_config(config)?;
info!(sl!(), "get kata config: {:?}", config);
}
@@ -172,20 +175,13 @@ impl TomlConfig {
/// drop-in config file fragments in config.d/.
pub fn load(content: &str) -> Result<TomlConfig> {
let mut config: TomlConfig = toml::from_str(content)?;
config.adjust_config()?;
Hypervisor::adjust_config(&mut config)?;
Runtime::adjust_config(&mut config)?;
Agent::adjust_config(&mut config)?;
info!(sl!(), "get kata config: {:?}", config);
Ok(config)
}
/// Adjust Kata configuration information.
pub fn adjust_config(&mut self) -> Result<()> {
Hypervisor::adjust_config(self)?;
Runtime::adjust_config(self)?;
Agent::adjust_config(self)?;
Ok(())
}
/// Validate Kata configuration information.
pub fn validate(&self) -> Result<()> {
Hypervisor::validate(self)?;

View File

@@ -1,15 +0,0 @@
// Copyright 2025 Kata Contributors
//
// SPDX-License-Identifier: Apache-2.0
//
//! Filesystem-related constants shared across Kata components.
/// Root filesystem type: ext4
pub const VM_ROOTFS_FILESYSTEM_EXT4: &str = "ext4";
/// Root filesystem type: xfs
pub const VM_ROOTFS_FILESYSTEM_XFS: &str = "xfs";
/// Root filesystem type: erofs
pub const VM_ROOTFS_FILESYSTEM_EROFS: &str = "erofs";

View File

@@ -3,7 +3,6 @@
// SPDX-License-Identifier: Apache-2.0
//
use crate::sl;
use anyhow::{anyhow, Context, Result};
use flate2::read::GzDecoder;
use serde::{Deserialize, Serialize};
@@ -24,8 +23,6 @@ pub enum ProtectedPlatform {
Snp,
/// Cca platform for ARM CCA
Cca,
/// Se platform for IBM SEL
Se,
/// Default with no protection
#[default]
NoProtection,
@@ -132,20 +129,20 @@ fn calculate_digest(algorithm: &str, data: &str) -> Result<Vec<u8>> {
let digest = match algorithm {
"sha256" => {
let mut hasher = Sha256::new();
hasher.update(data);
hasher.update(&data);
hasher.finalize().to_vec()
}
"sha384" => {
let mut hasher = Sha384::new();
hasher.update(data);
hasher.update(&data);
hasher.finalize().to_vec()
}
"sha512" => {
let mut hasher = Sha512::new();
hasher.update(data);
hasher.update(&data);
hasher.finalize().to_vec()
}
_ => return Err(anyhow!("unsupported Hash algorithm: {}", algorithm)),
_ => return Err(anyhow!("unsupported Hash algorithm: {}", algorithm).into()),
};
Ok(digest)
@@ -157,7 +154,6 @@ fn adjust_digest(digest: &[u8], platform: ProtectedPlatform) -> Vec<u8> {
ProtectedPlatform::Tdx => 48,
ProtectedPlatform::Snp => 32,
ProtectedPlatform::Cca => 64,
ProtectedPlatform::Se => 256,
ProtectedPlatform::NoProtection => digest.len(),
};
@@ -176,7 +172,7 @@ fn adjust_digest(digest: &[u8], platform: ProtectedPlatform) -> Vec<u8> {
/// Parse initdata
fn parse_initdata(initdata_str: &str) -> Result<InitData> {
let initdata: InitData = toml::from_str(initdata_str)?;
let initdata: InitData = toml::from_str(&initdata_str)?;
initdata.validate()?;
Ok(initdata)
@@ -196,7 +192,7 @@ pub fn calculate_initdata_digest(
let algorithm: &str = &initdata.algorithm;
// 2. Calculate Digest
let digest = calculate_digest(algorithm, initdata_toml).context("calculate digest")?;
let digest = calculate_digest(algorithm, &initdata_toml).context("calculate digest")?;
// 3. Adjust Digest with Platform
let digest_platform = adjust_digest(&digest, platform);
@@ -207,18 +203,12 @@ pub fn calculate_initdata_digest(
Ok(b64encoded_digest)
}
/// The argument `initdata_annotation` is a Standard base64 encoded string containing a TOML formatted content.
/// The argument `initda_annotation` is a Standard base64 encoded string containing a TOML formatted content.
/// This function decodes the base64 string, parses the TOML content into an InitData structure.
pub fn add_hypervisor_initdata_overrides(initdata_annotation: &str) -> Result<String> {
// If the initdata is empty, return an empty string
if initdata_annotation.is_empty() {
info!(sl!(), "initdata_annotation is empty");
return Ok("".to_string());
}
pub fn add_hypervisor_initdata_overrides(initda_annotation: &str) -> Result<String> {
// Base64 decode the annotation value
let b64_decoded =
base64::decode_config(initdata_annotation, base64::STANDARD).context("base64 decode")?;
base64::decode_config(initda_annotation, base64::STANDARD).context("base64 decode")?;
// Gzip decompress the decoded data
let mut gz_decoder = GzDecoder::new(&b64_decoded[..]);
@@ -241,139 +231,6 @@ mod tests {
use flate2::Compression;
use std::io::Write;
// create gzipped and base64 encoded string
fn create_encoded_input(content: &str) -> String {
let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
encoder.write_all(content.as_bytes()).unwrap();
let compressed = encoder.finish().unwrap();
base64::encode_config(&compressed, base64::STANDARD)
}
#[test]
fn test_empty_annotation() {
// Test with empty string input
let result = add_hypervisor_initdata_overrides("");
assert!(result.is_ok());
assert_eq!(result.unwrap(), "");
}
#[test]
fn test_empty_data_section() {
// Test with empty data section
let toml_content = r#"
algorithm = "sha384"
version = "0.1.0"
[data]
"#;
let encoded = create_encoded_input(toml_content);
let result = add_hypervisor_initdata_overrides(&encoded);
assert!(result.is_ok());
}
#[test]
fn test_valid_complete_initdata() {
// Test with complete InitData structure
let toml_content = r#"
algorithm = "sha384"
version = "0.1.0"
[data]
"aa.toml" = '''
[token_configs]
[token_configs.coco_as]
url = 'http://kbs-service.xxx.cluster.local:8080'
[token_configs.kbs]
url = 'http://kbs-service.xxx.cluster.local:8080'
'''
"cdh.toml" = '''
socket = 'unix:///run/guest-services/cdh.sock'
credentials = []
[kbc]
name = 'cc_kbc'
url = 'http://kbs-service.xxx.cluster.local:8080'
'''
"#;
let encoded = create_encoded_input(toml_content);
let result = add_hypervisor_initdata_overrides(&encoded);
assert!(result.is_ok());
let output = result.unwrap();
assert!(!output.is_empty());
assert!(output.contains("algorithm"));
assert!(output.contains("version"));
}
#[test]
fn test_invalid_base64() {
// Test with invalid base64 string
let invalid_base64 = "This is not valid base64!";
let result = add_hypervisor_initdata_overrides(invalid_base64);
assert!(result.is_err());
let error = result.unwrap_err();
assert!(error.to_string().contains("base64 decode"));
}
#[test]
fn test_valid_base64_invalid_gzip() {
// Test with valid base64 but invalid gzip content
let not_gzipped = "This is not gzipped content";
let encoded = base64::encode_config(not_gzipped.as_bytes(), base64::STANDARD);
let result = add_hypervisor_initdata_overrides(&encoded);
assert!(result.is_err());
let error = result.unwrap_err();
assert!(error.to_string().contains("gz decoder failed"));
}
#[test]
fn test_missing_algorithm() {
// Test with missing algorithm field
let toml_content = r#"
version = "0.1.0"
[data]
"test.toml" = '''
key = "value"
'''
"#;
let encoded = create_encoded_input(toml_content);
let result = add_hypervisor_initdata_overrides(&encoded);
// This might fail depending on whether algorithm is required
if result.is_err() {
assert!(result.unwrap_err().to_string().contains("parse initdata"));
}
}
#[test]
fn test_missing_version() {
// Test with missing version field
let toml_content = r#"
algorithm = "sha384"
[data]
"test.toml" = '''
key = "value"
'''
"#;
let encoded = create_encoded_input(toml_content);
let result = add_hypervisor_initdata_overrides(&encoded);
// This might fail depending on whether version is required
if result.is_err() {
assert!(result.unwrap_err().to_string().contains("parse initdata"));
}
}
/// Test InitData creation and serialization
#[test]
fn test_init_data() {
@@ -435,12 +292,6 @@ key = "value"
assert_eq!(cca_result.len(), 64);
assert_eq!(&cca_result[..32], &short_digest[..]);
assert_eq!(&cca_result[32..], vec![0u8; 32]);
// Test SE platform (requires 256 bytes)
let long_digest = vec![0xAA; 256];
let se_result = adjust_digest(&long_digest, ProtectedPlatform::Se);
assert_eq!(se_result.len(), 256);
assert_eq!(&se_result[..256], &long_digest[..256]);
}
/// Test hypervisor initdata processing with compression

View File

@@ -40,9 +40,6 @@ pub(crate) mod utils;
/// hypervisor capabilities
pub mod capabilities;
/// Filesystem-related constants
pub mod fs;
/// The Initdata specification defines the key data structures and algorithms for injecting
/// any well-defined data from an untrusted host into a TEE (Trusted Execution Environment).
pub mod initdata;

View File

@@ -205,48 +205,47 @@ pub struct NydusImageVolume {
pub snapshot_dir: String,
}
/// Represents a Kata virtual volume, encapsulating information for extra mount options and direct volumes.
/// Kata virtual volume to encapsulate information for extra mount options and direct volumes.
///
/// Direct communication channels between components like snapshotters, `kata-runtime`, `kata-agent`,
/// `image-rs`, and CSI drivers are often expensive to build and maintain.
/// It's very expensive to build direct communication channels to pass information:
/// - between snapshotters and kata-runtime/kata-agent/image-rs
/// - between CSI drivers and kata-runtime/kata-agent
///
/// Therefore, `KataVirtualVolume` is introduced as a common infrastructure to encapsulate
/// additional mount options and direct volume information. It serves as a superset of
/// `NydusExtraOptions` and `DirectVolumeMountInfo`.
/// So `KataVirtualVolume` is introduced to encapsulate extra mount options and direct volume
/// information, so we can build a common infrastructure to handle them.
/// `KataVirtualVolume` is a superset of `NydusExtraOptions` and `DirectVolumeMountInfo`.
///
/// The interpretation of other fields within this structure is determined by the `volume_type` field.
/// Value of `volume_type` determines how to interpret other fields in the structure.
///
/// # Volume Types:
/// - `KATA_VIRTUAL_VOLUME_IGNORE`
/// -- all other fields should be ignored/unused.
///
/// - `KATA_VIRTUAL_VOLUME_IGNORE`:
/// All other fields should be ignored/unused.
/// - `KATA_VIRTUAL_VOLUME_DIRECT_BLOCK`
/// -- `source`: the directly assigned block device
/// -- `fs_type`: filesystem type
/// -- `options`: mount options
/// -- `direct_volume`: additional metadata to pass to the agent regarding this volume.
///
/// - `KATA_VIRTUAL_VOLUME_DIRECT_BLOCK`:
/// - `source`: The directly assigned block device path.
/// - `fs_type`: Filesystem type.
/// - `options`: Mount options.
/// - `direct_volume`: Additional metadata to pass to the agent regarding this volume.
/// - `KATA_VIRTUAL_VOLUME_IMAGE_RAW_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_RAW_BLOCK`
/// -- `source`: path to the raw block image for the container image or layer.
/// -- `fs_type`: filesystem type
/// -- `options`: mount options
/// -- `dm_verity`: disk dm-verity information
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_RAW_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_RAW_BLOCK`:
/// - `source`: Path to the raw block image for the container image or layer.
/// - `fs_type`: Filesystem type.
/// - `options`: Mount options.
/// - `dm_verity`: Disk `dm-verity` information.
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_BLOCK`
/// -- `source`: path to nydus meta blob
/// -- `fs_type`: filesystem type
/// -- `nydus_image`: configuration information for nydus image.
/// -- `dm_verity`: disk dm-verity information
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_BLOCK`:
/// - `source`: Path to nydus meta blob.
/// - `fs_type`: Filesystem type.
/// - `nydus_image`: Configuration information for nydus image.
/// - `dm_verity`: Disk `dm-verity` information.
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_FS` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_FS`
/// -- `source`: path to nydus meta blob
/// -- `fs_type`: filesystem type
/// -- `nydus_image`: configuration information for nydus image.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_FS` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_FS`:
/// - `source`: Path to nydus meta blob.
/// - `fs_type`: Filesystem type.
/// - `nydus_image`: Configuration information for nydus image.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL`:
/// - `source`: Image reference.
/// - `image_pull`: Metadata for image pulling.
/// - `KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL`
/// -- `source`: image reference
/// -- `image_pull`: metadata for image pulling
#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)]
pub struct KataVirtualVolume {
/// Type of virtual volume.
@@ -276,7 +275,7 @@ pub struct KataVirtualVolume {
}
impl KataVirtualVolume {
/// Creates a new instance of `KataVirtualVolume` with the specified type.
/// Create a new instance of `KataVirtualVolume` with specified type.
pub fn new(volume_type: String) -> Self {
Self {
volume_type,
@@ -284,7 +283,7 @@ impl KataVirtualVolume {
}
}
/// Validates the virtual volume object.
/// Validate virtual volume object.
pub fn validate(&self) -> Result<()> {
match self.volume_type.as_str() {
KATA_VIRTUAL_VOLUME_DIRECT_BLOCK => {
@@ -366,25 +365,25 @@ impl KataVirtualVolume {
Ok(())
}
/// Serializes the virtual volume object to a JSON string.
/// Serialize the virtual volume object to json.
pub fn to_json(&self) -> Result<String> {
Ok(serde_json::to_string(self)?)
}
/// Deserializes a virtual volume object from a JSON string.
/// Deserialize a virtual volume object from json string.
pub fn from_json(value: &str) -> Result<Self> {
let volume: KataVirtualVolume = serde_json::from_str(value)?;
volume.validate()?;
Ok(volume)
}
/// Serializes the virtual volume object to a JSON string and encodes the string with base64.
/// Serialize the virtual volume object to json and encode the string with base64.
pub fn to_base64(&self) -> Result<String> {
let json = self.to_json()?;
Ok(base64::encode(json))
}
/// Decodes and deserializes a virtual volume object from a base64 encoded JSON string.
/// Decode and deserialize a virtual volume object from base64 encoded json string.
pub fn from_base64(value: &str) -> Result<Self> {
let json = base64::decode(value)?;
let volume: KataVirtualVolume = serde_json::from_slice(&json)?;
@@ -454,18 +453,18 @@ impl TryFrom<&NydusExtraOptions> for KataVirtualVolume {
}
}
/// Trait object for a storage device.
/// Trait object for storage device.
pub trait StorageDevice: Send + Sync {
/// Returns the path of the storage device, if available.
/// Path
fn path(&self) -> Option<&str>;
/// Cleans up resources related to the storage device.
/// Clean up resources related to the storage device.
fn cleanup(&self) -> Result<()>;
}
/// Joins a user-provided volume path with the Kata direct-volume root path.
/// Join user provided volume path with kata direct-volume root path.
///
/// The `volume_path` is base64-url-encoded and then safely joined to the `prefix`.
/// The `volume_path` is base64-url-encoded and then safely joined to the `prefix`
pub fn join_path(prefix: &str, volume_path: &str) -> Result<PathBuf> {
if volume_path.is_empty() {
return Err(anyhow!(std::io::ErrorKind::NotFound));
@@ -475,7 +474,7 @@ pub fn join_path(prefix: &str, volume_path: &str) -> Result<PathBuf> {
Ok(safe_path::scoped_join(prefix, b64_url_encoded_path)?)
}
/// Gets `DirectVolumeMountInfo` from `mountinfo.json`.
/// get DirectVolume mountInfo from mountinfo.json.
pub fn get_volume_mount_info(volume_path: &str) -> Result<DirectVolumeMountInfo> {
let volume_path = join_path(KATA_DIRECT_VOLUME_ROOT_PATH, volume_path)?;
let mount_info_file_path = volume_path.join(KATA_MOUNT_INFO_FILE_NAME);
@@ -485,30 +484,28 @@ pub fn get_volume_mount_info(volume_path: &str) -> Result<DirectVolumeMountInfo>
Ok(mount_info)
}
/// Checks whether a mount type is a marker for a Kata specific volume.
/// Check whether a mount type is a marker for Kata specific volume.
pub fn is_kata_special_volume(ty: &str) -> bool {
ty.len() > KATA_VOLUME_TYPE_PREFIX.len() && ty.starts_with(KATA_VOLUME_TYPE_PREFIX)
}
/// Checks whether a mount type is a marker for a Kata guest mount volume.
/// Check whether a mount type is a marker for Kata guest mount volume.
pub fn is_kata_guest_mount_volume(ty: &str) -> bool {
ty.len() > KATA_GUEST_MOUNT_PREFIX.len() && ty.starts_with(KATA_GUEST_MOUNT_PREFIX)
}
/// Checks whether a mount type is a marker for a Kata ephemeral volume.
/// Check whether a mount type is a marker for Kata ephemeral volume.
pub fn is_kata_ephemeral_volume(ty: &str) -> bool {
ty == KATA_EPHEMERAL_VOLUME_TYPE
}
/// Checks whether a mount type is a marker for a Kata hostdir volume.
/// Check whether a mount type is a marker for Kata hostdir volume.
pub fn is_kata_host_dir_volume(ty: &str) -> bool {
ty == KATA_HOST_DIR_VOLUME_TYPE
}
/// Splits a sandbox bindmount string into its real path and mode.
///
/// The `bindmount` format is typically `/path/to/dir` or `/path/to/dir:ro[:rw]`.
/// This function extracts the real path (without the suffix ":ro" or ":rw") and the mode.
/// sandbox bindmount format: /path/to/dir, or /path/to/dir:ro[:rw]
/// the real path is without suffix ":ro" or ":rw".
pub fn split_bind_mounts(bindmount: &str) -> (&str, &str) {
let (real_path, mode) = if bindmount.ends_with(SANDBOX_BIND_MOUNTS_RO) {
(
@@ -528,14 +525,12 @@ pub fn split_bind_mounts(bindmount: &str) -> (&str, &str) {
(real_path, mode)
}
/// Adjusts the root filesystem mounts based on the guest-pull mechanism.
///
/// This function disregards any provided `rootfs_mounts`. Instead, it forcefully creates
/// a single, default `KataVirtualVolume` specifically for guest-pull operations.
/// This volume's representation is then base64-encoded and added as the only option
/// to a new, singular `Mount` entry, which becomes the sole item in the returned `Vec<Mount>`.
/// This ensures that when guest pull is active, the root filesystem is exclusively
/// configured via this virtual volume.
/// This function, adjust_rootfs_mounts, manages the root filesystem mounts based on guest-pull mechanism.
/// - the function disregards any provided rootfs_mounts.
/// Instead, it forcefully creates a single, default KataVirtualVolume specifically for guest-pull operations.
/// This volume's representation is then base64-encoded and added as the only option to a new, singular Mount entry,
/// which becomes the sole item in the returned Vec<Mount>.
/// This ensures that when guest pull is active, the root filesystem is exclusively configured via this virtual volume.
pub fn adjust_rootfs_mounts() -> Result<Vec<Mount>> {
// We enforce a single, default KataVirtualVolume as the exclusive rootfs mount.
let volume = KataVirtualVolume::new(KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL.to_string());

View File

@@ -186,7 +186,7 @@ mod tests {
"./test_hypervisor_hook_path"
);
assert!(!hv.memory_info.enable_mem_prealloc);
assert_eq!(hv.cpu_info.default_vcpus, 12.0);
assert_eq!(hv.cpu_info.default_vcpus, 12);
assert!(!hv.memory_info.enable_guest_swap);
assert_eq!(hv.memory_info.default_memory, 100);
assert!(!hv.enable_iothreads);

View File

@@ -22,7 +22,6 @@ slog-json = "2.4.0"
slog-term = "2.9.1"
slog-async = "2.7.0"
slog-scope = "4.4.0"
slog-journald = "2.2.0"
lazy_static = "1.3.0"
arc-swap = "1.5.0"

View File

@@ -81,11 +81,6 @@ pub fn create_term_logger(level: slog::Level) -> (slog::Logger, slog_async::Asyn
(logger, guard)
}
pub enum LogDestination {
File(Box<dyn Write + Send + Sync>),
Journal,
}
// Creates a logger which prints output as JSON
// XXX: 'writer' param used to make testing possible.
pub fn create_logger<W>(
@@ -97,43 +92,13 @@ pub fn create_logger<W>(
where
W: Write + Send + Sync + 'static,
{
create_logger_with_destination(name, source, level, LogDestination::File(Box::new(writer)))
}
// Creates a logger which prints output as JSON or to systemd journal
pub fn create_logger_with_destination(
name: &str,
source: &str,
level: slog::Level,
destination: LogDestination,
) -> (slog::Logger, slog_async::AsyncGuard) {
// Check the destination type before consuming it.
// The `matches` macro performs a non-consuming check (it borrows).
let is_journal_destination = matches!(destination, LogDestination::Journal);
// The target type for boxed drain. Note that Err = slog::Never.
// Both `.fuse()` and `.ignore_res()` convert potential errors into a non-returning path
// (panic or ignore), so they never return an Err.
let drain: Box<dyn Drain<Ok = (), Err = slog::Never> + Send> = match destination {
LogDestination::File(writer) => {
// `destination` is `File`.
let json_drain = slog_json::Json::new(writer)
.add_default_keys()
.build()
.fuse();
Box::new(json_drain)
}
LogDestination::Journal => {
// `destination` is `Journal`.
let journal_drain = slog_journald::JournaldDrain.ignore_res();
Box::new(journal_drain)
}
};
let json_drain = slog_json::Json::new(writer)
.add_default_keys()
.build()
.fuse();
// Ensure only a unique set of key/value fields is logged
let unique_drain = UniqueDrain::new(drain).fuse();
let unique_drain = UniqueDrain::new(json_drain).fuse();
// Adjust the level which will be applied to the log-system
// Info is the default level, but if Debug flag is set, the overall log level will be changed to Debug here
@@ -154,28 +119,16 @@ pub fn create_logger_with_destination(
.thread_name("slog-async-logger".into())
.build_with_guard();
// Create a base logger with common fields.
let base_logger = slog::Logger::root(
// Add some "standard" fields
let logger = slog::Logger::root(
async_drain.fuse(),
o!(
"version" => env!("CARGO_PKG_VERSION"),
o!("version" => env!("CARGO_PKG_VERSION"),
"subsystem" => DEFAULT_SUBSYSTEM,
"pid" => process::id().to_string(),
"name" => name.to_string(),
"source" => source.to_string()
),
"source" => source.to_string()),
);
// If not journal destination, the logger remains the base_logger.
let logger = if is_journal_destination {
// Use the .new() method to build a child logger which inherits all existing
// key-value pairs from its parent and supplements them with additional ones.
// This is the idiomatic way.
base_logger.new(o!("SYSLOG_IDENTIFIER" => "kata"))
} else {
base_logger
};
(logger, guard)
}
@@ -549,12 +502,7 @@ mod tests {
let record_key = "record-key-1";
let record_value = "record-key-2";
let (logger, guard) = create_logger_with_destination(
name,
source,
level,
LogDestination::File(Box::new(writer)),
);
let (logger, guard) = create_logger(name, source, level, writer);
let msg = "foo, bar, baz";
@@ -713,12 +661,7 @@ mod tests {
.reopen()
.unwrap_or_else(|_| panic!("{:?}: failed to clone tempfile", msg));
let (logger, logger_guard) = create_logger_with_destination(
name,
source,
d.slog_level,
LogDestination::File(Box::new(writer)),
);
let (logger, logger_guard) = create_logger(name, source, d.slog_level, writer);
// Call the logger (which calls the drain)
(d.closure)(&logger, d.msg.to_owned());

View File

@@ -115,7 +115,7 @@ impl From<oci::PosixRlimit> for grpc::POSIXRlimit {
impl From<oci::Process> for grpc::Process {
fn from(from: oci::Process) -> Self {
grpc::Process {
Terminal: from.terminal().is_some_and(|t| t),
Terminal: from.terminal().map_or(false, |t| t),
ConsoleSize: from_option(from.console_size()),
User: from_option(Some(from.user().clone())),
Args: option_vec_to_vec(from.args()),
@@ -161,7 +161,7 @@ impl From<oci::LinuxMemory> for grpc::LinuxMemory {
Kernel: from.kernel().map_or(0, |t| t),
KernelTCP: from.kernel_tcp().map_or(0, |t| t),
Swappiness: from.swappiness().map_or(0, |t| t),
DisableOOMKiller: from.disable_oom_killer().is_some_and(|t| t),
DisableOOMKiller: from.disable_oom_killer().map_or(false, |t| t),
..Default::default()
}
}

View File

@@ -355,7 +355,6 @@ mod tests {
.read(false)
.write(true)
.create(true)
.truncate(true)
.mode(0o200)
.open(&path)
.unwrap();
@@ -377,7 +376,6 @@ mod tests {
.read(false)
.write(true)
.create(true)
.truncate(true)
.mode(0o200)
.open(&path)
.unwrap();

View File

@@ -90,7 +90,7 @@ pub fn mgmt_socket_addr(sid: &str) -> Result<String> {
));
}
get_uds_with_sid(sid, sb_storage_path()?)
get_uds_with_sid(sid, &sb_storage_path()?)
}
#[cfg(test)]

View File

@@ -851,16 +851,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "crypto-mac"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
dependencies = [
"generic-array",
"subtle",
]
[[package]]
name = "darling"
version = "0.14.4"
@@ -1797,16 +1787,6 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hmac"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
dependencies = [
"crypto-mac",
"digest 0.9.0",
]
[[package]]
name = "hmac"
version = "0.12.1"
@@ -2324,23 +2304,6 @@ version = "0.2.172"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
[[package]]
name = "libsystemd"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f4f0b5b062ba67aa075e331de778082c09e66b5ef32970ea5a1e9c37c9555d1"
dependencies = [
"hmac 0.11.0",
"libc",
"log",
"nix 0.23.2",
"once_cell",
"serde",
"sha2 0.9.3",
"thiserror 1.0.69",
"uuid 0.8.2",
]
[[package]]
name = "libz-sys"
version = "1.1.22"
@@ -2427,7 +2390,6 @@ dependencies = [
"serde_json",
"slog",
"slog-async",
"slog-journald",
"slog-json",
"slog-scope",
"slog-term",
@@ -2835,7 +2797,7 @@ dependencies = [
"bitflags 1.3.2",
"fuse-backend-rs",
"hex",
"hmac 0.12.1",
"hmac",
"httpdate",
"lazy_static",
"libc",
@@ -4502,16 +4464,6 @@ dependencies = [
"thread_local",
]
[[package]]
name = "slog-journald"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83e14eb8c2f5d0c8fc9fbac40e6391095e4dc5cb334f7dce99c75cb1919eb39c"
dependencies = [
"libsystemd",
"slog",
]
[[package]]
name = "slog-json"
version = "2.6.1"
@@ -4665,9 +4617,9 @@ dependencies = [
[[package]]
name = "subtle"
version = "2.4.1"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
@@ -5243,15 +5195,6 @@ dependencies = [
"rand 0.3.23",
]
[[package]]
name = "uuid"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
dependencies = [
"serde",
]
[[package]]
name = "uuid"
version = "1.16.0"

View File

@@ -147,7 +147,6 @@ DEFMAXMEMSZ := 0
##VAR DEFBRIDGES=<number> Default number of bridges
DEFBRIDGES := 0
DEFENABLEANNOTATIONS := [\"kernel_params\"]
DEFENABLEANNOTATIONS_COCO := [\"kernel_params\",\"cc_init_data\"]
DEFDISABLEGUESTSECCOMP := true
DEFDISABLEGUESTEMPTYDIR := false
##VAR DEFAULTEXPFEATURES=[features] Default experimental features enabled
@@ -483,7 +482,6 @@ USER_VARS += DEFVIRTIOFSCACHE
USER_VARS += DEFVIRTIOFSQUEUESIZE
USER_VARS += DEFVIRTIOFSEXTRAARGS
USER_VARS += DEFENABLEANNOTATIONS
USER_VARS += DEFENABLEANNOTATIONS_COCO
USER_VARS += DEFENABLEIOTHREADS
USER_VARS += DEFSECCOMPSANDBOXPARAM
USER_VARS += DEFGUESTSELINUXLABEL

View File

@@ -195,9 +195,6 @@ block_device_driver = "virtio-blk-pci"
# result in memory pre allocation
#enable_hugepages = true
# Disable the 'seccomp' feature from Cloud Hypervisor or firecracker, default false
# disable_seccomp = true
# This option changes the default hypervisor and kernel parameters
# to enable debug output where available.
#

View File

@@ -45,7 +45,7 @@ confidential_guest = true
# List of valid annotation names for the hypervisor
# Each member of the list is a regular expression, which is the base name
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
enable_annotations = @DEFENABLEANNOTATIONS_COCO@
enable_annotations = @DEFENABLEANNOTATIONS@
# List of valid annotations values for the hypervisor
# Each member of the list is a path pattern as described by glob(3).
@@ -541,7 +541,7 @@ kernel_modules=[]
# Agent dial timeout in millisecond.
# (default: 10)
dial_timeout_ms = 90
dial_timeout_ms = 30
# Agent reconnect timeout in millisecond.
# Retry times = reconnect_timeout_ms / dial_timeout_ms (default: 300)
@@ -550,7 +550,7 @@ dial_timeout_ms = 90
# You'd better not change the value of dial_timeout_ms, unless you have an
# idea of what you are doing.
# (default: 3000)
reconnect_timeout_ms = 5000
#reconnect_timeout_ms = 3000
# Create Container Request Timeout
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.

View File

@@ -29,7 +29,7 @@ remote_hypervisor_timeout = 600
#
# Known limitations:
# * Does not work by design:
# - CPU Hotplug
# - CPU Hotplug
# - Memory Hotplug
# - NVDIMM devices
#
@@ -41,7 +41,7 @@ remote_hypervisor_timeout = 600
# Each member of the list is a regular expression, which is the base name
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
# Note: Remote hypervisor is only handling the following annotations
enable_annotations = ["machine_type", "default_memory", "default_vcpus", "default_gpus", "default_gpu_model", "cc_init_data"]
enable_annotations = ["machine_type", "default_memory", "default_vcpus", "default_gpus", "default_gpu_model"]
# Optional space-separated list of options to pass to the guest kernel.
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
@@ -251,7 +251,7 @@ disable_guest_seccomp=true
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
# (like OVS) directly.
# (default: false)
# Note: The remote hypervisor has a different networking model, which requires true
# Note: The remote hypervisor has a different networking model, which requires true
disable_new_netns = false
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.

View File

@@ -145,9 +145,6 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
# result in memory pre allocation
#enable_hugepages = true
# Disable the 'seccomp' feature from Cloud Hypervisor or firecracker, default false
# disable_seccomp = true
# Enable vIOMMU, default false
# Enabling this will result in the VM having a vIOMMU device
# This will also add the following options to the kernel's

View File

@@ -319,12 +319,12 @@ impl TryFrom<(CpuInfo, GuestProtection)> for CpusConfig {
let guest_protection_to_use = args.1;
// This can only happen if runtime-rs fails to set default values.
if cpu.default_vcpus <= 0.0 {
if cpu.default_vcpus <= 0 {
return Err(CpusConfigError::BootVCPUsTooSmall);
}
let default_vcpus = u8::try_from(cpu.default_vcpus.ceil() as u32)
.map_err(CpusConfigError::BootVCPUsTooBig)?;
let default_vcpus =
u8::try_from(cpu.default_vcpus).map_err(CpusConfigError::BootVCPUsTooBig)?;
// This can only happen if runtime-rs fails to set default values.
if cpu.default_maxvcpus == 0 {
@@ -611,7 +611,7 @@ mod tests {
};
let cpu_info = CpuInfo {
default_vcpus: cpu_default as f32,
default_vcpus: cpu_default as i32,
default_maxvcpus,
..Default::default()
@@ -1159,7 +1159,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: -1.0,
default_vcpus: -1,
..Default::default()
},
@@ -1168,7 +1168,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 0,
..Default::default()
@@ -1178,7 +1178,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 9.0,
default_vcpus: 9,
default_maxvcpus: 7,
..Default::default()
@@ -1188,7 +1188,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
..Default::default()
},
@@ -1208,7 +1208,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 3,
..Default::default()
},
@@ -1228,7 +1228,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 13,
..Default::default()
},
@@ -1823,7 +1823,7 @@ mod tests {
cfg: HypervisorConfig {
cpu_info: CpuInfo {
default_vcpus: 0.0,
default_vcpus: 0,
..cpu_info.clone()
},
@@ -1939,7 +1939,7 @@ mod tests {
vsock_socket_path: "vsock_socket_path".into(),
cfg: HypervisorConfig {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
..Default::default()
@@ -1963,7 +1963,7 @@ mod tests {
..Default::default()
},
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
..Default::default()

View File

@@ -103,9 +103,6 @@ impl FcInner {
cmd.args(["--api-sock", &self.asock_path]);
}
}
if self.config.security_info.disable_seccomp {
cmd.arg("--no-seccomp");
}
debug!(sl(), "Exec: {:?}", cmd);
// Make sure we're in the correct Network Namespace

View File

@@ -8,12 +8,10 @@ use anyhow::{anyhow, Result};
use crate::{
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_BLK_CCW, VM_ROOTFS_DRIVER_MMIO, VM_ROOTFS_DRIVER_PMEM,
VM_ROOTFS_FILESYSTEM_EROFS, VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS,
VM_ROOTFS_ROOT_BLK, VM_ROOTFS_ROOT_PMEM,
};
use kata_types::config::LOG_VPORT_OPTION;
use kata_types::fs::{
VM_ROOTFS_FILESYSTEM_EROFS, VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS,
};
// Port where the agent will send the logs. Logs are sent through the vsock in cases
// where the hypervisor has no console.sock, i.e dragonball
@@ -181,10 +179,9 @@ mod tests {
use super::*;
use crate::{
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM, VM_ROOTFS_ROOT_BLK, VM_ROOTFS_ROOT_PMEM,
};
use kata_types::fs::{
VM_ROOTFS_FILESYSTEM_EROFS, VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS,
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM, VM_ROOTFS_FILESYSTEM_EROFS,
VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS, VM_ROOTFS_ROOT_BLK,
VM_ROOTFS_ROOT_PMEM,
};
#[test]

View File

@@ -47,6 +47,11 @@ const VM_ROOTFS_DRIVER_MMIO: &str = "virtio-blk-mmio";
const VM_ROOTFS_ROOT_BLK: &str = "/dev/vda1";
const VM_ROOTFS_ROOT_PMEM: &str = "/dev/pmem0p1";
// Config which filesystem to use as rootfs type
const VM_ROOTFS_FILESYSTEM_EXT4: &str = "ext4";
const VM_ROOTFS_FILESYSTEM_XFS: &str = "xfs";
const VM_ROOTFS_FILESYSTEM_EROFS: &str = "erofs";
// before using hugepages for VM, we need to mount hugetlbfs
// /dev/hugepages will be the mount point
// mkdir -p /dev/hugepages

View File

@@ -2182,14 +2182,6 @@ impl<'a> QemuCmdLine<'a> {
qemu_cmd_line.add_virtio_balloon();
}
if let Some(seccomp_sandbox) = &config
.security_info
.seccomp_sandbox
.as_ref()
.filter(|s| !s.is_empty())
{
qemu_cmd_line.add_seccomp_sandbox(seccomp_sandbox);
}
Ok(qemu_cmd_line)
}
@@ -2628,11 +2620,6 @@ impl<'a> QemuCmdLine<'a> {
Ok(())
}
pub fn add_seccomp_sandbox(&mut self, param: &str) {
let seccomp_sandbox = SeccompSandbox::new(param);
self.devices.push(Box::new(seccomp_sandbox));
}
pub async fn build(&self) -> Result<Vec<String>> {
let mut result = Vec::new();
@@ -2719,23 +2706,3 @@ impl ToQemuParams for DeviceVirtioBalloon {
])
}
}
#[derive(Debug)]
struct SeccompSandbox {
param: String,
}
impl SeccompSandbox {
fn new(param: &str) -> Self {
SeccompSandbox {
param: param.to_owned(),
}
}
}
#[async_trait]
impl ToQemuParams for SeccompSandbox {
async fn qemu_params(&self) -> Result<Vec<String>> {
Ok(vec!["-sandbox".to_owned(), self.param.clone()])
}
}

View File

@@ -25,7 +25,7 @@ pub struct CpuResource {
pub(crate) current_vcpu: Arc<RwLock<u32>>,
/// Default number of vCPUs
pub(crate) default_vcpu: f32,
pub(crate) default_vcpu: u32,
/// CpuResource of each container
pub(crate) container_cpu_resources: Arc<RwLock<HashMap<String, LinuxContainerCpuResources>>>,
@@ -40,7 +40,7 @@ impl CpuResource {
.context(format!("failed to get hypervisor {}", hypervisor_name))?;
Ok(Self {
current_vcpu: Arc::new(RwLock::new(hypervisor_config.cpu_info.default_vcpus as u32)),
default_vcpu: hypervisor_config.cpu_info.default_vcpus,
default_vcpu: hypervisor_config.cpu_info.default_vcpus as u32,
container_cpu_resources: Arc::new(RwLock::new(HashMap::new())),
})
}
@@ -117,66 +117,27 @@ impl CpuResource {
// calculates the total required vcpus by adding each container's requirements within the pod
async fn calc_cpu_resources(&self) -> Result<u32> {
let resources = self.container_cpu_resources.read().await;
if resources.is_empty() {
return Ok(self.default_vcpu.ceil() as u32);
}
// If requests of individual containers are expresses with different
// periods we'll need to rewrite them with a common denominator
// (period) before we can add the numerators (quotas). We choose
// to use the largest period as the common denominator since it
// shifts precision out of the fractional part and into the
// integral part in case a rewritten quota ends up non-integral.
let max_period = resources
.iter()
.map(|(_, cpu_resource)| cpu_resource.period())
.max()
// It's ok to unwrap() here as we have checked that 'resources' is
// not empty.
.unwrap() as f64;
let mut total_vcpu = 0;
let mut cpuset_vcpu: HashSet<u32> = HashSet::new();
// Even though summing up quotas is fixed-point conceptually we
// represent the sum as floating-point because
// - we might be rewriting the quota/period fractions if periods
// vary,and a rewritten quota can end up non-integral. We want
// to preserve the fractional parts until the final rounding
// not to lose precision inadvertenty.
// - also to avoid some tedious casting doing maths with quotas.
// Using a 64-bit float to represent what are conceptually integral
// numbers should be safe here - f64 starts losing precision for
// integers only past 2^53 and a sums of quotas are extremely unlikely
// to reach that magnitude.
let mut total_quota: f64 = 0.0;
let resources = self.container_cpu_resources.read().await;
for (_, cpu_resource) in resources.iter() {
let vcpu = cpu_resource.get_vcpus().unwrap_or(0) as u32;
cpuset_vcpu.extend(cpu_resource.cpuset().iter());
let quota = cpu_resource.quota() as f64;
let period = cpu_resource.period() as f64;
if quota >= 0.0 && period != 0.0 {
total_quota += quota * (max_period / period);
}
total_vcpu += vcpu;
}
// contrained only by cpuset
if total_quota == 0.0 && !cpuset_vcpu.is_empty() {
if total_vcpu == 0 && !cpuset_vcpu.is_empty() {
info!(sl!(), "(from cpuset)get vcpus # {:?}", cpuset_vcpu);
return Ok(cpuset_vcpu.len() as u32);
}
let total_vcpu = if total_quota > 0.0 && max_period != 0.0 {
self.default_vcpu as f64 + total_quota / max_period
} else {
self.default_vcpu as f64
};
info!(
sl!(),
"(from cfs_quota&cfs_period)get vcpus count {}", total_vcpu
);
Ok(total_vcpu.ceil() as u32)
Ok(total_vcpu)
}
// do hotplug and hot-unplug the vcpu
@@ -198,7 +159,7 @@ impl CpuResource {
// do not reduce computing power
// the number of vcpus would not be lower than the default size
let new_vcpus = cmp::max(new_vcpus, self.default_vcpu.ceil() as u32);
let new_vcpus = cmp::max(new_vcpus, self.default_vcpu);
let (_, new) = hypervisor
.resize_vcpu(old_vcpus, new_vcpus)
@@ -208,238 +169,3 @@ impl CpuResource {
Ok(new)
}
}
#[cfg(test)]
mod tests {
use super::*;
use kata_types::config::{Hypervisor, TomlConfig};
use oci::LinuxCpu;
fn get_cpu_resource_with_default_vcpus(default_vcpus: f32) -> CpuResource {
let mut config = TomlConfig::default();
config
.hypervisor
.insert("qemu".to_owned(), Hypervisor::default());
config
.hypervisor
.entry("qemu".to_owned())
.and_modify(|hv_config| hv_config.cpu_info.default_vcpus = default_vcpus);
config.runtime.hypervisor_name = "qemu".to_owned();
CpuResource::new(Arc::new(config)).unwrap()
}
async fn add_linux_container_cpu_resources(cpu_res: &mut CpuResource, res: Vec<(i64, u64)>) {
let mut resources = cpu_res.container_cpu_resources.write().await;
for (i, (quota, period)) in res.iter().enumerate() {
let mut linux_cpu = LinuxCpu::default();
linux_cpu.set_quota(Some(*quota));
linux_cpu.set_period(Some(*period));
let res = LinuxContainerCpuResources::try_from(&linux_cpu).unwrap();
resources.insert(i.to_string(), res);
}
}
// A lot of the following tests document why a fixed-point-style
// calc_cpu_resources() implementation is better than a f32-based one.
#[tokio::test]
async fn test_rounding() {
let mut cpu_resource = get_cpu_resource_with_default_vcpus(0.0);
// A f32-based calc_cpu_resources() implementation would fail this
// test (adding 0.1 ten times gives roughly 1.0000001).
// An f64-based implementation would pass this one (with the summation
// result of 0.99999999999999989) but it still doesn't guarantee the
// correct result in general. For instance, adding 0.1 twenty times
// in 64 bits results in 2.0000000000000004.
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
],
)
.await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 1);
}
#[tokio::test]
async fn test_big_allocation_1() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(32_000_000, 1_000_000),
(32_000_000, 1_000_000),
(64_000_000, 1_000_000),
],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
128 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_big_allocation_2() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(33_000_000, 1_000_000),
(31_000_000, 1_000_000),
(77_000_011, 1_000_000),
],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
(33 + 31 + 77 + 1) + default_vcpus as u32
);
}
#[tokio::test]
async fn test_big_allocation_3() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(141_000_008, 1_000_000)]).await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
142 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_big_allocation_4() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(17_000_001, 1_000_000),
(17_000_001, 1_000_000),
(17_000_001, 1_000_000),
(17_000_001, 1_000_000),
],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
(4 * 17 + 1) + default_vcpus as u32
);
}
#[tokio::test]
async fn test_divisible_periods() {
let default_vcpus = 3.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(1_000_000, 1_000_000), (1_000_000, 500_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
3 + default_vcpus as u32
);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(3_000_000, 1_500_000), (1_000_000, 500_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
4 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_indivisible_periods() {
let default_vcpus = 1.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(1_000_000, 1_000_000), (900_000, 300_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
4 + default_vcpus as u32
);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(1_000_000, 1_000_000), (900_000, 299_999)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
5 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_fractional_default_vcpus() {
let default_vcpus = 0.5;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(250_000, 1_000_000)]).await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 1);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(500_000, 1_000_000)]).await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 1);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(500_001, 1_000_000)]).await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 2);
// This test doesn't pass because 0.1 is periodic in binary and thus
// not exactly representable by a float of any width for fundamental
// reasons. Its actual representation is slightly over 0.1
// (e.g. 0.100000001 in f32), which after adding the 900_000/1_000_000
// container request pushes the sum over 1.
// I don't think this problem is solvable without expressing
// 'default_vcpus' in configuration.toml in a fixed point manner (e.g.
// as an integral percentage of a vCPU).
/*
let default_vcpus = 0.1;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(900_000, 1_000_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
1
);
*/
}
}

Some files were not shown because too many files have changed in this diff Show More