Compare commits

..

3 Commits

Author SHA1 Message Date
stevenhorsman
f60dd4891d DO NOT MERGE: Comment out tests for a reduced CI burden 2025-07-17 14:51:19 +01:00
stevenhorsman
01e65b804a WIP: workflows: Trigger CI tests with gh cli
- We are hitting limitations where gh can only call actions with 20 workflow jobs,
so split out the CI test jobs as separate workflows and call them once the required
artifacts are created.

- Note: This commit updates the test runs to use the source branch
workflows, rather than target branch that we have at the moment,
we need to think about this pretty carefully to check there isn't
any secuirty concerns here (I think there aren't as we are already
vulnerable in this respect)

WIP: Run the workflow on the ref, or the `main` version?
2025-07-17 14:51:19 +01:00
stevenhorsman
b6d5820434 tests: Add gha-helper function with trigger action
Add a script for triggering a gha action given the workflow file,
ref and input json, so we can reduce duplicated code in our workflow file
and make it easier to update in a single place
2025-07-17 14:51:19 +01:00
190 changed files with 4513 additions and 7284 deletions

View File

@@ -23,4 +23,3 @@ self-hosted-runner:
- s390x
- s390x-large
- tdx
- amd64-nvidia-a100

View File

@@ -49,8 +49,6 @@ jobs:
- name: Install dependencies
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
@@ -91,8 +89,6 @@ jobs:
- name: Install dependencies
run: bash tests/stability/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
@@ -136,8 +132,6 @@ jobs:
- name: Install dependencies
run: bash tests/integration/nydus/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
@@ -215,8 +209,6 @@ jobs:
- name: Install dependencies
run: bash tests/functional/tracing/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
@@ -261,8 +253,6 @@ jobs:
- name: Install dependencies
run: bash tests/functional/vfio/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
@@ -304,8 +294,6 @@ jobs:
- name: Install dependencies
run: bash tests/integration/docker/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
@@ -351,7 +339,6 @@ jobs:
- name: Install dependencies
env:
GITHUB_API_TOKEN: ${{ github.token }}
GH_TOKEN: ${{ github.token }}
run: bash tests/integration/nerdctl/gha-run.sh install-dependencies
- name: get-kata-tarball
@@ -396,8 +383,6 @@ jobs:
- name: Install dependencies
run: bash tests/functional/kata-agent-apis/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0

View File

@@ -48,9 +48,7 @@ jobs:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Install dependencies
run: bash tests/integration/cri-containerd/gha-run.sh
env:
GH_TOKEN: ${{ github.token }}
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0

View File

@@ -23,8 +23,6 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: false
KBUILD_SIGN_PIN:
required: true
permissions:
contents: read
@@ -97,7 +95,6 @@ jobs:
- name: Build ${{ matrix.asset }}
id: build
run: |
[[ "${KATA_ASSET}" == *"nvidia"* ]] && echo "KBUILD_SIGN_PIN=${{ secrets.KBUILD_SIGN_PIN }}" >> "${GITHUB_ENV}"
make "${KATA_ASSET}-tarball"
build_dir=$(readlink -f build)
# store-artifact does not work with symlink
@@ -204,7 +201,6 @@ jobs:
- name: Build ${{ matrix.asset }}
id: build
run: |
[[ "${KATA_ASSET}" == *"nvidia"* ]] && echo "KBUILD_SIGN_PIN=${{ secrets.KBUILD_SIGN_PIN }}" >> "${GITHUB_ENV}"
./tests/gha-adjust-to-use-prebuilt-components.sh kata-artifacts "${KATA_ASSET}"
make "${KATA_ASSET}-tarball"
build_dir=$(readlink -f build)
@@ -347,8 +343,6 @@ jobs:
- name: merge-artifacts
run: |
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
env:
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
- name: store-artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:

View File

@@ -313,8 +313,6 @@ jobs:
- name: merge-artifacts
run: |
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
env:
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
- name: store-artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:

View File

@@ -256,8 +256,6 @@ jobs:
- name: merge-artifacts
run: |
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
env:
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
- name: store-artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:

View File

@@ -342,8 +342,6 @@ jobs:
- name: merge-artifacts
run: |
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
env:
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
- name: store-artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:

View File

@@ -31,4 +31,3 @@ jobs:
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -8,10 +8,11 @@ permissions:
jobs:
kata-containers-ci-on-push:
permissions:
contents: read
packages: write
id-token: write
actions: write
attestations: write
contents: read
id-token: write
packages: write
uses: ./.github/workflows/ci.yaml
with:
commit-hash: ${{ github.sha }}
@@ -27,8 +28,6 @@ jobs:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-checks:
uses: ./.github/workflows/build-checks.yaml

View File

@@ -13,10 +13,11 @@ permissions:
jobs:
kata-containers-ci-on-push:
permissions:
contents: read
packages: write
id-token: write
actions: write
attestations: write
contents: read
id-token: write
packages: write
uses: ./.github/workflows/ci.yaml
with:
commit-hash: ${{ github.sha }}
@@ -31,5 +32,3 @@ jobs:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -3,6 +3,7 @@ on:
pull_request_target:
branches:
- 'main'
- 'stable-*'
types:
# Adding 'labeled' to the list of activity types that trigger this event
# (default: opened, synchronize, reopened) so that we can run this
@@ -32,10 +33,11 @@ jobs:
needs: skipper
if: ${{ needs.skipper.outputs.skip_build != 'yes' }}
permissions:
contents: read
packages: write
id-token: write
actions: write
attestations: write
contents: read
id-token: write
packages: write
uses: ./.github/workflows/ci.yaml
with:
commit-hash: ${{ github.event.pull_request.head.sha }}
@@ -51,5 +53,3 @@ jobs:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -27,8 +27,6 @@ on:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions:
contents: read
@@ -45,8 +43,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64

View File

@@ -35,10 +35,6 @@ on:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
NGC_API_KEY:
required: true
KBUILD_SIGN_PIN:
required: true
permissions:
contents: read
@@ -56,8 +52,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64
@@ -77,112 +71,112 @@ jobs:
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-kata-static-tarball-arm64:
permissions:
contents: read
packages: write
id-token: write
attestations: write
uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
# build-kata-static-tarball-arm64:
# permissions:
# contents: read
# packages: write
# id-token: write
# attestations: write
# uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
publish-kata-deploy-payload-arm64:
needs: build-kata-static-tarball-arm64
permissions:
contents: read
packages: write
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-arm64
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: ubuntu-22.04-arm
arch: arm64
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
# publish-kata-deploy-payload-arm64:
# needs: build-kata-static-tarball-arm64
# permissions:
# contents: read
# packages: write
# uses: ./.github/workflows/publish-kata-deploy-payload.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-arm64
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: ubuntu-22.04-arm
# arch: arm64
# secrets:
# QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-kata-static-tarball-s390x:
permissions:
contents: read
packages: write
id-token: write
attestations: write
uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
CI_HKD_PATH: ${{ secrets.ci_hkd_path }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
# build-kata-static-tarball-s390x:
# permissions:
# contents: read
# packages: write
# id-token: write
# attestations: write
# uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# secrets:
# CI_HKD_PATH: ${{ secrets.ci_hkd_path }}
# QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-kata-static-tarball-ppc64le:
permissions:
contents: read
packages: write
uses: ./.github/workflows/build-kata-static-tarball-ppc64le.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
# build-kata-static-tarball-ppc64le:
# permissions:
# contents: read
# packages: write
# uses: ./.github/workflows/build-kata-static-tarball-ppc64le.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# secrets:
# QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-kata-static-tarball-riscv64:
permissions:
contents: read
packages: write
id-token: write
attestations: write
uses: ./.github/workflows/build-kata-static-tarball-riscv64.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
# build-kata-static-tarball-riscv64:
# permissions:
# contents: read
# packages: write
# id-token: write
# attestations: write
# uses: ./.github/workflows/build-kata-static-tarball-riscv64.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# secrets:
# QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
publish-kata-deploy-payload-s390x:
needs: build-kata-static-tarball-s390x
permissions:
contents: read
packages: write
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-s390x
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: s390x
arch: s390x
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
# publish-kata-deploy-payload-s390x:
# needs: build-kata-static-tarball-s390x
# permissions:
# contents: read
# packages: write
# uses: ./.github/workflows/publish-kata-deploy-payload.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-s390x
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: s390x
# arch: s390x
# secrets:
# QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
publish-kata-deploy-payload-ppc64le:
needs: build-kata-static-tarball-ppc64le
permissions:
contents: read
packages: write
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-ppc64le
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: ppc64le
arch: ppc64le
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
# publish-kata-deploy-payload-ppc64le:
# needs: build-kata-static-tarball-ppc64le
# permissions:
# contents: read
# packages: write
# uses: ./.github/workflows/publish-kata-deploy-payload.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-ppc64le
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: ppc64le
# arch: ppc64le
# secrets:
# QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-and-publish-tee-confidential-unencrypted-image:
permissions:
@@ -281,29 +275,61 @@ jobs:
run-kata-monitor-tests:
if: ${{ inputs.skip-test != 'yes' }}
runs-on: ubuntu-22.04
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/run-kata-monitor-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
env:
GH_TOKEN: ${{ github.token }}
permissions:
actions: write # Permission to trigger the gh workflows
steps:
- name: Checkout the code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
persist-credentials: false
- name: Trigger kata monitor tests
run: |
./tests/gha-helper.sh trigger-and-check-workflow \
run-kata-monitor-tests.yaml \
${{ github.ref }} \
${{ github.sha }} \
'{
"artifact-run-id":"${{ github.run_id }}",
"tarball-suffix":"-${{ inputs.tag }}",
"commit-hash":"${{ inputs.commit-hash }}",
"target-branch":"${{ inputs.target-branch }}"
}'
run-k8s-tests-on-aks:
if: ${{ inputs.skip-test != 'yes' }}
runs-on: ubuntu-22.04
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
env:
GH_TOKEN: ${{ github.token }}
permissions:
actions: write # Permission to trigger the gh workflows
steps:
- name: Checkout the code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
persist-credentials: false
- name: Trigger run-k8s-tests-on-aks workflow
run: |
./tests/gha-helper.sh trigger-and-check-workflow \
run-k8s-tests-on-aks.yaml \
${{ github.ref }} \
${{ github.sha }} \
'{
"artifact-run-id":"${{ github.run_id }}",
"tarball-suffix":"-${{ inputs.tag }}",
"registry":"ghcr.io",
"repo":"${{ github.repository_owner }}/kata-deploy-ci",
"tag":"${{ inputs.tag }}-amd64",
"commit-hash":"${{ inputs.commit-hash }}",
"pr-number":"${{ inputs.pr-number }}",
"target-branch":"${{ inputs.target-branch }}"
}'
run-k8s-tests-on-amd64:
if: ${{ inputs.skip-test != 'yes' }}
@@ -317,195 +343,206 @@ jobs:
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
run-k8s-tests-on-arm64:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-arm64
uses: ./.github/workflows/run-k8s-tests-on-arm64.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-arm64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
run-k8s-tests-on-nvidia-gpu:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-amd64
uses: ./.github/workflows/run-k8s-tests-on-nvidia-gpu.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
# run-k8s-tests-on-arm64:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: publish-kata-deploy-payload-arm64
# uses: ./.github/workflows/run-k8s-tests-on-arm64.yaml
# with:
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-arm64
# commit-hash: ${{ inputs.commit-hash }}
# pr-number: ${{ inputs.pr-number }}
# target-branch: ${{ inputs.target-branch }}
run-kata-coco-tests:
if: ${{ inputs.skip-test != 'yes' }}
runs-on: ubuntu-22.04
needs:
- publish-kata-deploy-payload-amd64
- build-and-publish-tee-confidential-unencrypted-image
- publish-csi-driver-amd64
uses: ./.github/workflows/run-kata-coco-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
ITA_KEY: ${{ secrets.ITA_KEY }}
- publish-kata-deploy-payload-amd64
- build-and-publish-tee-confidential-unencrypted-image
- publish-csi-driver-amd64
env:
GH_TOKEN: ${{ github.token }}
permissions:
actions: write # Permission to trigger the gh workflows
steps:
- name: Checkout the code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
persist-credentials: false
- name: Trigger kata-coco-tests for amd64 arch
run: |
./tests/gha-helper.sh trigger-and-check-workflow \
run-kata-coco-tests.yaml \
${{ github.ref }} \
${{ github.sha }} \
'{
"artifact-run-id":"${{ github.run_id }}",
"tarball-suffix":"-${{ inputs.tag }}",
"registry":"ghcr.io",
"repo":"${{ github.repository_owner }}/kata-deploy-ci",
"tag":"${{ inputs.tag }}-amd64",
"commit-hash":"${{ inputs.commit-hash }}",
"pr-number":"${{ inputs.pr-number }}",
"target-branch":"${{ inputs.target-branch }}"
}'
run-k8s-tests-on-zvsi:
if: ${{ inputs.skip-test != 'yes' }}
needs: [publish-kata-deploy-payload-s390x, build-and-publish-tee-confidential-unencrypted-image]
uses: ./.github/workflows/run-k8s-tests-on-zvsi.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-s390x
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
# run-k8s-tests-on-zvsi:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: [publish-kata-deploy-payload-s390x, build-and-publish-tee-confidential-unencrypted-image]
# uses: ./.github/workflows/run-k8s-tests-on-zvsi.yaml
# with:
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-s390x
# commit-hash: ${{ inputs.commit-hash }}
# pr-number: ${{ inputs.pr-number }}
# target-branch: ${{ inputs.target-branch }}
# secrets:
# AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
run-k8s-tests-on-ppc64le:
if: ${{ inputs.skip-test != 'yes' }}
needs: publish-kata-deploy-payload-ppc64le
uses: ./.github/workflows/run-k8s-tests-on-ppc64le.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-ppc64le
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
# run-k8s-tests-on-ppc64le:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: publish-kata-deploy-payload-ppc64le
# uses: ./.github/workflows/run-k8s-tests-on-ppc64le.yaml
# with:
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-ppc64le
# commit-hash: ${{ inputs.commit-hash }}
# pr-number: ${{ inputs.pr-number }}
# target-branch: ${{ inputs.target-branch }}
run-kata-deploy-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: [publish-kata-deploy-payload-amd64]
uses: ./.github/workflows/run-kata-deploy-tests.yaml
with:
registry: ghcr.io
repo: ${{ github.repository_owner }}/kata-deploy-ci
tag: ${{ inputs.tag }}-amd64
commit-hash: ${{ inputs.commit-hash }}
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
# run-kata-deploy-tests:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: [publish-kata-deploy-payload-amd64]
# uses: ./.github/workflows/run-kata-deploy-tests.yaml
# with:
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-amd64
# commit-hash: ${{ inputs.commit-hash }}
# pr-number: ${{ inputs.pr-number }}
# target-branch: ${{ inputs.target-branch }}
run-basic-amd64-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64
uses: ./.github/workflows/basic-ci-amd64.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
# run-metrics-tests:
# # Skip metrics tests whilst runner is broken
# if: false
# # if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-amd64
# uses: ./.github/workflows/run-metrics.yaml
# with:
# registry: ghcr.io
# repo: ${{ github.repository_owner }}/kata-deploy-ci
# tag: ${{ inputs.tag }}-amd64
# commit-hash: ${{ inputs.commit-hash }}
# pr-number: ${{ inputs.pr-number }}
# target-branch: ${{ inputs.target-branch }}
run-basic-s390x-tests:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-s390x
uses: ./.github/workflows/basic-ci-s390x.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
# run-basic-amd64-tests:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-amd64
# uses: ./.github/workflows/basic-ci-amd64.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
run-cri-containerd-amd64:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-amd64
strategy:
fail-fast: false
matrix:
params: [
{ containerd_version: lts, vmm: clh },
{ containerd_version: lts, vmm: dragonball },
{ containerd_version: lts, vmm: qemu },
{ containerd_version: lts, vmm: stratovirt },
{ containerd_version: lts, vmm: cloud-hypervisor },
{ containerd_version: lts, vmm: qemu-runtime-rs },
{ containerd_version: active, vmm: clh },
{ containerd_version: active, vmm: dragonball },
{ containerd_version: active, vmm: qemu },
{ containerd_version: active, vmm: stratovirt },
{ containerd_version: active, vmm: cloud-hypervisor },
{ containerd_version: active, vmm: qemu-runtime-rs },
]
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: ubuntu-22.04
arch: amd64
containerd_version: ${{ matrix.params.containerd_version }}
vmm: ${{ matrix.params.vmm }}
# run-basic-s390x-tests:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-s390x
# uses: ./.github/workflows/basic-ci-s390x.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
run-cri-containerd-s390x:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-s390x
strategy:
fail-fast: false
matrix:
params: [
{ containerd_version: active, vmm: qemu },
{ containerd_version: active, vmm: qemu-runtime-rs },
]
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: s390x-large
arch: s390x
containerd_version: ${{ matrix.params.containerd_version }}
vmm: ${{ matrix.params.vmm }}
# run-cri-containerd-amd64:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-amd64
# strategy:
# fail-fast: false
# matrix:
# params: [
# { containerd_version: lts, vmm: clh },
# { containerd_version: lts, vmm: dragonball },
# { containerd_version: lts, vmm: qemu },
# { containerd_version: lts, vmm: stratovirt },
# { containerd_version: lts, vmm: cloud-hypervisor },
# { containerd_version: lts, vmm: qemu-runtime-rs },
# { containerd_version: active, vmm: clh },
# { containerd_version: active, vmm: dragonball },
# { containerd_version: active, vmm: qemu },
# { containerd_version: active, vmm: stratovirt },
# { containerd_version: active, vmm: cloud-hypervisor },
# { containerd_version: active, vmm: qemu-runtime-rs },
# ]
# uses: ./.github/workflows/run-cri-containerd-tests.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: ubuntu-22.04
# arch: amd64
# containerd_version: ${{ matrix.params.containerd_version }}
# vmm: ${{ matrix.params.vmm }}
run-cri-containerd-tests-ppc64le:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-ppc64le
strategy:
fail-fast: false
matrix:
params: [
{ containerd_version: active, vmm: qemu },
]
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: ppc64le
arch: ppc64le
containerd_version: ${{ matrix.params.containerd_version }}
vmm: ${{ matrix.params.vmm }}
# run-cri-containerd-s390x:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-s390x
# strategy:
# fail-fast: false
# matrix:
# params: [
# { containerd_version: active, vmm: qemu },
# { containerd_version: active, vmm: qemu-runtime-rs },
# ]
# uses: ./.github/workflows/run-cri-containerd-tests.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: s390x-large
# arch: s390x
# containerd_version: ${{ matrix.params.containerd_version }}
# vmm: ${{ matrix.params.vmm }}
run-cri-containerd-tests-arm64:
if: ${{ inputs.skip-test != 'yes' }}
needs: build-kata-static-tarball-arm64
strategy:
fail-fast: false
matrix:
params: [
{ containerd_version: active, vmm: qemu },
]
uses: ./.github/workflows/run-cri-containerd-tests.yaml
with:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
runner: arm64-non-k8s
arch: arm64
containerd_version: ${{ matrix.params.containerd_version }}
vmm: ${{ matrix.params.vmm }}
# run-cri-containerd-tests-ppc64le:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-ppc64le
# strategy:
# fail-fast: false
# matrix:
# params: [
# { containerd_version: active, vmm: qemu },
# ]
# uses: ./.github/workflows/run-cri-containerd-tests.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: ppc64le
# arch: ppc64le
# containerd_version: ${{ matrix.params.containerd_version }}
# vmm: ${{ matrix.params.vmm }}
# run-cri-containerd-tests-arm64:
# if: ${{ inputs.skip-test != 'yes' }}
# needs: build-kata-static-tarball-arm64
# strategy:
# fail-fast: false
# matrix:
# params: [
# { containerd_version: active, vmm: qemu },
# ]
# uses: ./.github/workflows/run-cri-containerd-tests.yaml
# with:
# tarball-suffix: -${{ inputs.tag }}
# commit-hash: ${{ inputs.commit-hash }}
# target-branch: ${{ inputs.target-branch }}
# runner: arm64-non-k8s
# arch: arm64
# containerd_version: ${{ matrix.params.containerd_version }}
# vmm: ${{ matrix.params.vmm }}

View File

@@ -1,41 +0,0 @@
# A sample workflow which sets up periodic OSV-Scanner scanning for vulnerabilities,
# in addition to a PR check which fails if new vulnerabilities are introduced.
#
# For more examples and options, including how to ignore specific vulnerabilities,
# see https://google.github.io/osv-scanner/github-action/
name: OSV-Scanner
on:
workflow_dispatch:
pull_request:
branches: [ "main" ]
schedule:
- cron: '0 1 * * 0'
push:
branches: [ "main" ]
jobs:
scan-scheduled:
permissions:
actions: read # # Required to upload SARIF file to CodeQL
contents: read # Read commit contents
security-events: write # Require writing security events to upload SARIF file to security tab
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@b00f71e051ddddc6e46a193c31c8c0bf283bf9e6" # v2.1.0
with:
scan-args: |-
-r
./
scan-pr:
permissions:
actions: read # Required to upload SARIF file to CodeQL
contents: read # Read commit contents
security-events: write # Require writing security events to upload SARIF file to security tab
if: ${{ github.event_name == 'pull_request' }}
uses: "google/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@b00f71e051ddddc6e46a193c31c8c0bf283bf9e6" # v2.1.0
with:
# Example of specifying custom arguments
scan-args: |-
-r
./

View File

@@ -25,7 +25,6 @@ jobs:
target-branch: ${{ github.ref_name }}
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-assets-arm64:
permissions:

View File

@@ -8,8 +8,6 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions:
contents: read
@@ -22,7 +20,6 @@ jobs:
stage: release
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
permissions:
contents: read
packages: write

View File

@@ -35,7 +35,6 @@ jobs:
target-arch: amd64
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-and-push-assets-arm64:
needs: release

View File

@@ -59,8 +59,6 @@ jobs:
- name: Install dependencies
timeout-minutes: 15
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball for ${{ inputs.arch }}
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0

View File

@@ -2,6 +2,11 @@ name: CI | Run kubernetes tests on AKS
on:
workflow_call:
inputs:
artifact-run-id:
description: "The run id where the artifact was uploaded"
required: false
type: string
default: ${{ github.run_id }}
tarball-suffix:
required: false
type: string
@@ -32,6 +37,41 @@ on:
required: true
AZ_SUBSCRIPTION_ID:
required: true
workflow_dispatch:
inputs:
artifact-run-id:
description: "The workflow run id where the artifact was uploaded"
required: true
type: string
tarball-suffix:
description: "The suffix of the kata tarball to use"
required: false
type: string
registry:
description: "The oci container registry to install kata-deploy from"
required: true
type: string
repo:
description: "The oci container repository/image to install kata-deploy from"
required: true
type: string
tag:
description: "The oci container image tag to install kata-deploy using"
required: true
type: string
pr-number:
description: "Identifier used to distinguish between PRs/dev/nightly tests"
required: true
type: string
commit-hash:
description: "The code to checkout for testing"
required: false
type: string
target-branch:
description: "The target branch to rebase on and ensure the tests are up-to-date"
required: false
type: string
default: ""
permissions:
@@ -101,6 +141,9 @@ jobs:
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
run-id: ${{ inputs.artifact-run-id }}
github-token: ${{ github.token }}
repository: ${{ github.repository}}
path: kata-artifacts
- name: Install kata

View File

@@ -1,89 +0,0 @@
name: CI | Run NVIDIA GPU kubernetes tests on arm64
on:
workflow_call:
inputs:
registry:
required: true
type: string
repo:
required: true
type: string
tag:
required: true
type: string
pr-number:
required: true
type: string
commit-hash:
required: false
type: string
target-branch:
required: false
type: string
default: ""
secrets:
NGC_API_KEY:
required: true
permissions: {}
jobs:
run-nvidia-gpu-tests-on-amd64:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-nvidia-gpu
k8s:
- kubeadm
runs-on: amd64-nvidia-a100
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: ${{ matrix.k8s }}
USING_NFD: "false"
K8S_TEST_HOST_TYPE: all
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
persist-credentials: false
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
- name: Install `bats`
run: bash tests/integration/kubernetes/gha-run.sh install-bats
- name: Run tests
timeout-minutes: 30
run: bash tests/integration/kubernetes/gha-run.sh run-nv-tests
env:
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
- name: Collect artifacts ${{ matrix.vmm }}
if: always()
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
continue-on-error: true
- name: Archive artifacts ${{ matrix.vmm }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.k8s }}-${{ inputs.tag }}
path: /tmp/artifacts
retention-days: 1
- name: Delete kata-deploy
if: always()
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh cleanup

View File

@@ -91,6 +91,9 @@ jobs:
- name: Install kata
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-artifacts
- name: Download Azure CLI
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:

View File

@@ -2,6 +2,11 @@ name: CI | Run kata coco tests
on:
workflow_call:
inputs:
artifact-run-id:
description: "The run id where the artifact was uploaded"
required: false
type: string
default: ${{ github.run_id }}
tarball-suffix:
required: false
type: string
@@ -35,181 +40,216 @@ on:
required: true
ITA_KEY:
required: true
workflow_dispatch:
inputs:
artifact-run-id:
description: "The workflow run id where the artifact was uploaded"
required: true
type: string
tarball-suffix:
description: "The suffix of the kata tarball to use"
required: false
type: string
registry:
description: "The oci container registry to install kata-deploy from"
required: true
type: string
repo:
description: "The oci container repository/image to install kata-deploy from"
required: true
type: string
tag:
description: "The oci container image tag to install kata-deploy using"
required: true
type: string
pr-number:
description: "Identifier used to distinguish between PRs/dev/nightly tests"
required: true
type: string
commit-hash:
description: "The code to checkout for testing"
required: false
type: string
target-branch:
description: "The target branch to rebase on and ensure the tests are up-to-date"
required: false
type: string
default: ""
permissions:
contents: read
id-token: write
jobs:
run-k8s-tests-on-tdx:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-tdx
snapshotter:
- nydus
pull-type:
- guest-pull
runs-on: tdx
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBERNETES: "vanilla"
USING_NFD: "true"
KBS: "true"
K8S_TEST_HOST_TYPE: "baremetal"
KBS_INGRESS: "nodeport"
SNAPSHOTTER: ${{ matrix.snapshotter }}
PULL_TYPE: ${{ matrix.pull-type }}
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
ITA_KEY: ${{ secrets.ITA_KEY }}
AUTO_GENERATE_POLICY: "yes"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
persist-credentials: false
# run-k8s-tests-on-tdx:
# strategy:
# fail-fast: false
# matrix:
# vmm:
# - qemu-tdx
# snapshotter:
# - nydus
# pull-type:
# - guest-pull
# runs-on: tdx
# env:
# DOCKER_REGISTRY: ${{ inputs.registry }}
# DOCKER_REPO: ${{ inputs.repo }}
# DOCKER_TAG: ${{ inputs.tag }}
# GH_PR_NUMBER: ${{ inputs.pr-number }}
# KATA_HYPERVISOR: ${{ matrix.vmm }}
# KUBERNETES: "vanilla"
# USING_NFD: "true"
# KBS: "true"
# K8S_TEST_HOST_TYPE: "baremetal"
# KBS_INGRESS: "nodeport"
# SNAPSHOTTER: ${{ matrix.snapshotter }}
# PULL_TYPE: ${{ matrix.pull-type }}
# AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
# AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
# ITA_KEY: ${{ secrets.ITA_KEY }}
# AUTO_GENERATE_POLICY: "yes"
# steps:
# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# with:
# ref: ${{ inputs.commit-hash }}
# fetch-depth: 0
# persist-credentials: false
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
# - name: Rebase atop of the latest target branch
# run: |
# ./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
# env:
# TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Snapshotter
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
# - name: Deploy Snapshotter
# timeout-minutes: 5
# run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
# - name: Deploy Kata
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
- name: Uninstall previous `kbs-client`
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
# - name: Uninstall previous `kbs-client`
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
- name: Deploy CoCo KBS
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
# - name: Deploy CoCo KBS
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
- name: Install `kbs-client`
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
# - name: Install `kbs-client`
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
- name: Deploy CSI driver
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
# - name: Deploy CSI driver
# timeout-minutes: 5
# run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
- name: Run tests
timeout-minutes: 100
run: bash tests/integration/kubernetes/gha-run.sh run-tests
# - name: Run tests
# timeout-minutes: 100
# run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
# - name: Delete kata-deploy
# if: always()
# run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
- name: Delete Snapshotter
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
# - name: Delete Snapshotter
# if: always()
# run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
- name: Delete CoCo KBS
if: always()
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
# - name: Delete CoCo KBS
# if: always()
# run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
- name: Delete CSI driver
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
# - name: Delete CSI driver
# timeout-minutes: 5
# run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
run-k8s-tests-sev-snp:
strategy:
fail-fast: false
matrix:
vmm:
- qemu-snp
snapshotter:
- nydus
pull-type:
- guest-pull
runs-on: sev-snp
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
DOCKER_TAG: ${{ inputs.tag }}
GH_PR_NUMBER: ${{ inputs.pr-number }}
KATA_HYPERVISOR: ${{ matrix.vmm }}
KUBECONFIG: /home/kata/.kube/config
KUBERNETES: "vanilla"
USING_NFD: "false"
KBS: "true"
KBS_INGRESS: "nodeport"
K8S_TEST_HOST_TYPE: "baremetal"
SNAPSHOTTER: ${{ matrix.snapshotter }}
PULL_TYPE: ${{ matrix.pull-type }}
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTO_GENERATE_POLICY: "yes"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.commit-hash }}
fetch-depth: 0
persist-credentials: false
# run-k8s-tests-sev-snp:
# strategy:
# fail-fast: false
# matrix:
# vmm:
# - qemu-snp
# snapshotter:
# - nydus
# pull-type:
# - guest-pull
# runs-on: sev-snp
# env:
# DOCKER_REGISTRY: ${{ inputs.registry }}
# DOCKER_REPO: ${{ inputs.repo }}
# DOCKER_TAG: ${{ inputs.tag }}
# GH_PR_NUMBER: ${{ inputs.pr-number }}
# KATA_HYPERVISOR: ${{ matrix.vmm }}
# KUBECONFIG: /home/kata/.kube/config
# KUBERNETES: "vanilla"
# USING_NFD: "false"
# KBS: "true"
# KBS_INGRESS: "nodeport"
# K8S_TEST_HOST_TYPE: "baremetal"
# SNAPSHOTTER: ${{ matrix.snapshotter }}
# PULL_TYPE: ${{ matrix.pull-type }}
# AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
# AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
# AUTO_GENERATE_POLICY: "yes"
# steps:
# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# with:
# ref: ${{ inputs.commit-hash }}
# fetch-depth: 0
# persist-credentials: false
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
# - name: Rebase atop of the latest target branch
# run: |
# ./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
# env:
# TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Deploy Snapshotter
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
# - name: Deploy Snapshotter
# timeout-minutes: 5
# run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
- name: Deploy Kata
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
# - name: Deploy Kata
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
- name: Uninstall previous `kbs-client`
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
# - name: Uninstall previous `kbs-client`
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
- name: Deploy CoCo KBS
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
# - name: Deploy CoCo KBS
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
- name: Install `kbs-client`
timeout-minutes: 10
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
# - name: Install `kbs-client`
# timeout-minutes: 10
# run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
- name: Deploy CSI driver
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
# - name: Deploy CSI driver
# timeout-minutes: 5
# run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
- name: Run tests
timeout-minutes: 50
run: bash tests/integration/kubernetes/gha-run.sh run-tests
# - name: Run tests
# timeout-minutes: 50
# run: bash tests/integration/kubernetes/gha-run.sh run-tests
- name: Delete kata-deploy
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp
# - name: Delete kata-deploy
# if: always()
# run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp
- name: Delete Snapshotter
if: always()
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
# - name: Delete Snapshotter
# if: always()
# run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
- name: Delete CoCo KBS
if: always()
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
# - name: Delete CoCo KBS
# if: always()
# run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
- name: Delete CSI driver
timeout-minutes: 5
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
# - name: Delete CSI driver
# timeout-minutes: 5
# run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
# Generate jobs for testing CoCo on non-TEE environments
run-k8s-tests-coco-nontee:
@@ -263,11 +303,17 @@ jobs:
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
run-id: ${{ inputs.artifact-run-id }}
github-token: ${{ github.token }}
repository: ${{ github.repository}}
path: kata-artifacts
- name: Install kata
run: bash tests/integration/kubernetes/gha-run.sh install-kata-tools kata-artifacts
- name: Download Azure CLI
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:

View File

@@ -72,6 +72,9 @@ jobs:
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: Download Azure CLI
run: bash tests/functional/kata-deploy/gha-run.sh install-azure-cli
- name: Log into the Azure account
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:

View File

@@ -2,6 +2,11 @@ name: CI | Run kata-monitor tests
on:
workflow_call:
inputs:
artifact-run-id:
description: "The run id where the artifact was uploaded"
required: false
type: string
default: ${{ github.run_id }}
tarball-suffix:
required: false
type: string
@@ -12,6 +17,25 @@ on:
required: false
type: string
default: ""
workflow_dispatch:
inputs:
artifact-run-id:
description: "The workflow run id where the artifact was uploaded"
required: true
type: string
tarball-suffix:
description: "Identifier used to distinguish between PRs/dev/nightly tests"
required: false
type: string
commit-hash:
description: "The code to checkout for testing"
required: false
type: string
target-branch:
description: "The target branch to rebase on and ensure the tests are up-to-date"
required: false
type: string
default: ""
permissions:
contents: read
@@ -54,13 +78,14 @@ jobs:
- name: Install dependencies
run: bash tests/functional/kata-monitor/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
run-id: ${{ inputs.artifact-run-id }}
github-token: ${{ github.token }}
repository: ${{ github.repository}}
path: kata-artifacts
- name: Install kata

View File

@@ -38,8 +38,6 @@ jobs:
- name: Install dependencies
run: bash tests/integration/runk/gha-run.sh install-dependencies
env:
GH_TOKEN: ${{ github.token }}
- name: get-kata-tarball
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0

View File

@@ -150,36 +150,3 @@ jobs:
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
uses: ./.github/workflows/govulncheck.yaml
codegen:
runs-on: ubuntu-22.04
needs: skipper
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
permissions:
contents: read # for checkout
steps:
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
persist-credentials: false
- name: generate
run: make -C src/agent generate-protocols
- name: check for diff
run: |
diff=$(git diff)
if [[ -z "${diff}" ]]; then
echo "No diff detected."
exit 0
fi
cat << EOF >> "${GITHUB_STEP_SUMMARY}"
Run \`make -C src/agent generate-protocols\` to update protobuf bindings.
\`\`\`diff
${diff}
\`\`\`
EOF
echo "::error::Golang protobuf bindings need to be regenerated (see Github step summary for diff)."
exit 1

View File

@@ -42,7 +42,7 @@ generate-protocols:
# Some static checks rely on generated source files of components.
static-checks: static-checks-build
bash tests/static-checks.sh
bash tests/static-checks.sh github.com/kata-containers/kata-containers
docs-url-alive-check:
bash ci/docs-url-alive-check.sh

View File

@@ -105,7 +105,7 @@ Please raise an issue
[in this repository](https://github.com/kata-containers/kata-containers/issues).
> **Note:**
> If you are reporting a security issue, please follow the [vulnerability reporting process](SECURITY.md)
> If you are reporting a security issue, please follow the [vulnerability reporting process](https://github.com/kata-containers/community#vulnerability-handling)
## Developers

View File

@@ -1,79 +0,0 @@
# Security Policy
Kata Containers is a **rolling-release** project: every monthly release replaces the previous one, and only the _current_ release series receives security fixes. There are **no long-term-support branches**.
---
## Reporting a Vulnerability
### How to report
- **Keep it private first.**
Please **do not** open a public GitHub issue or pull request for security problems.
- **Use GitHubs built-in security advisory workflow.**
See GitHubs official guide:
[Creating a repository security advisory](https://docs.github.com/en/code-security/security-advisories/working-with-repository-security-advisories/creating-a-repository-security-advisory#creating-a-security-advisory)
### What happens after you submit
We follow the OpenSSF vulnerability-handling guidelines.
The table below shows the target timelines we hold ourselves to once we receive your report.
| Stage | Target time | Notes |
|-------|-------------|-------|
| **Initial acknowledgement** | ≤ 14 calendar days | Maintainers confirm receipt and start triage. |
| **Triage & CVSS-v3.1 scoring** | ≤ 30 days | We assign severity and plan remediation. |
| **Fix availability** | Next scheduled monthly release<br />(or an out-of-band patch for Critical/High issues) | We may cut a `vX.Y.Z` patch if waiting a month poses undue risk. |
---
## Supported Versions
| Release | First published | Security-fix window |
|---------|-----------------|---------------------|
| **Latest monthly release** | see `git tag --sort=-creatordate \| head -n 1` | Actively maintained |
| Any prior release | — | **Unsupported** please upgrade |
> **Why no backports?**
> Katas architecture evolves quickly; back-porting patches would re-introduce the very maintenance burden we avoid by using a rolling model.
---
## Disclosure Process & Fix Delivery
1. We develop the fix on a private branch.
2. Once validated, we coordinate embargo dates with downstream consumers when appropriate.
3. The fix ships in **either**:
* Common: The next regular monthly release (e.g., `v3.19`) when impact is moderate and waiting does not materially increase risk, **or**
* Exception: A point release (e.g., `v3.18.1`) if the vulnerability affects only the current series.
4. After the fix is public, we request a CVE ID (if not already issued) and publish details.
---
## Security Advisories & Release Notes
* Each patch or monthly release includes a **Security Bulletin** section in its GitHub *Release Notes* summarizing:
* affected components & versions,
* CVE identifiers (if assigned),
* severity / CVSS score,
* mitigation steps,
* upgrade instructions.
* We do **not** publish separate “stable-branch” advisories because unsupported branches receive no fixes.
---
## Frequently Asked Questions
**Q: I run `v3.16` will you patch it?**
A: No. Upgrade to the latest monthly release.
**Q: Can I get early access to embargoed fixes?**
A: Only project members under the disclosure agreement (see [SECURITY_CONTACTS](SECURITY_CONTACTS)) receive advance patches.
**Q: Where can I discuss the vulnerability once it is public?**
A: Open/continue a GitHub issue **after** the advisory is published, or use `#kata-containers` on Slack with a link to the advisory.
---
*Last updated:* 2025-06-27

View File

@@ -1,13 +0,0 @@
# Copyright (c) 2025 Kata Containers Authors
#
# SPDX-License-Identifier: Apache-2.0
#
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Committee to reach out
# to for triaging and handling of incoming issues.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT [SECURITY.md](SECURITY.md)
@kata-containers/architecture-committee

View File

@@ -1 +1 @@
3.19.1
3.18.0

311
src/agent/Cargo.lock generated
View File

@@ -508,15 +508,6 @@ dependencies = [
"wyz",
]
[[package]]
name = "block-buffer"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
dependencies = [
"generic-array",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
@@ -898,16 +889,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "crypto-mac"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
dependencies = [
"generic-array",
"subtle",
]
[[package]]
name = "darling"
version = "0.14.4"
@@ -977,19 +958,6 @@ dependencies = [
"syn 2.0.101",
]
[[package]]
name = "dashmap"
version = "5.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
"cfg-if",
"hashbrown 0.14.5",
"lock_api",
"once_cell",
"parking_lot_core",
]
[[package]]
name = "deranged"
version = "0.4.0"
@@ -1052,22 +1020,13 @@ dependencies = [
"syn 2.0.101",
]
[[package]]
name = "digest"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
dependencies = [
"generic-array",
]
[[package]]
name = "digest"
version = "0.10.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer 0.10.4",
"block-buffer",
"crypto-common",
]
@@ -1520,12 +1479,6 @@ dependencies = [
"ahash 0.7.8",
]
[[package]]
name = "hashbrown"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
[[package]]
name = "hashbrown"
version = "0.15.3"
@@ -1571,16 +1524,6 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hmac"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
dependencies = [
"crypto-mac",
"digest 0.9.0",
]
[[package]]
name = "home"
version = "0.5.9"
@@ -1911,17 +1854,6 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "io-uring"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013"
dependencies = [
"bitflags 2.9.0",
"cfg-if",
"libc",
]
[[package]]
name = "iovec"
version = "0.1.4"
@@ -2064,16 +1996,16 @@ dependencies = [
"libc",
"log",
"logging",
"mem-agent-lib",
"mem-agent",
"netlink-packet-core",
"netlink-packet-route",
"netlink-sys 0.7.0",
"nix 0.26.4",
"nix 0.24.3",
"oci-spec",
"opentelemetry",
"procfs 0.12.0",
"prometheus",
"protobuf",
"protobuf 3.7.2",
"protocols",
"regex",
"rstest",
@@ -2087,7 +2019,7 @@ dependencies = [
"serde",
"serde_json",
"serial_test",
"sha2 0.10.9",
"sha2",
"slog",
"slog-scope",
"slog-stdlog",
@@ -2137,7 +2069,7 @@ dependencies = [
"lazy_static",
"libc",
"mockall",
"nix 0.26.4",
"nix 0.24.3",
"oci-spec",
"once_cell",
"pci-ids",
@@ -2171,7 +2103,7 @@ dependencies = [
"serde",
"serde-enum-str",
"serde_json",
"sha2 0.10.9",
"sha2",
"slog",
"slog-scope",
"sysinfo",
@@ -2248,23 +2180,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a7cbbd4ad467251987c6e5b47d53b11a5a05add08f2447a9e2d70aef1e0d138"
[[package]]
name = "libsystemd"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f4f0b5b062ba67aa075e331de778082c09e66b5ef32970ea5a1e9c37c9555d1"
dependencies = [
"hmac",
"libc",
"log",
"nix 0.23.2",
"once_cell",
"serde",
"sha2 0.9.9",
"thiserror 1.0.69",
"uuid 0.8.2",
]
[[package]]
name = "libz-sys"
version = "1.1.22"
@@ -2328,18 +2243,11 @@ dependencies = [
"serde_json",
"slog",
"slog-async",
"slog-journald",
"slog-json",
"slog-scope",
"slog-term",
]
[[package]]
name = "maplit"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
[[package]]
name = "matchers"
version = "0.0.1"
@@ -2350,14 +2258,14 @@ dependencies = [
]
[[package]]
name = "mem-agent-lib"
version = "0.2.0"
name = "mem-agent"
version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
"chrono",
"maplit",
"nix 0.30.1",
"lazy_static",
"nix 0.23.2",
"page_size",
"slog",
"slog-scope",
@@ -2599,18 +2507,6 @@ dependencies = [
"libc",
]
[[package]]
name = "nix"
version = "0.30.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6"
dependencies = [
"bitflags 2.9.0",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]]
name = "nom"
version = "7.1.3"
@@ -2790,12 +2686,6 @@ version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "opaque-debug"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
[[package]]
name = "opentelemetry"
version = "0.14.0"
@@ -2848,6 +2738,17 @@ version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba"
[[package]]
name = "parking_lot"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core 0.8.6",
]
[[package]]
name = "parking_lot"
version = "0.12.3"
@@ -2855,7 +2756,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
dependencies = [
"lock_api",
"parking_lot_core",
"parking_lot_core 0.9.10",
]
[[package]]
name = "parking_lot_core"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
dependencies = [
"cfg-if",
"instant",
"libc",
"redox_syscall 0.2.16",
"smallvec",
"winapi",
]
[[package]]
@@ -2866,7 +2781,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"redox_syscall 0.5.12",
"smallvec",
"windows-targets 0.52.6",
]
@@ -3157,21 +3072,22 @@ dependencies = [
[[package]]
name = "procfs"
version = "0.17.0"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f"
checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4"
dependencies = [
"bitflags 2.9.0",
"hex",
"lazy_static",
"procfs-core",
"rustix 0.38.44",
]
[[package]]
name = "procfs-core"
version = "0.17.0"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec"
checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29"
dependencies = [
"bitflags 2.9.0",
"hex",
@@ -3179,19 +3095,19 @@ dependencies = [
[[package]]
name = "prometheus"
version = "0.14.0"
version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a"
checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1"
dependencies = [
"cfg-if",
"fnv",
"lazy_static",
"libc",
"memchr",
"parking_lot",
"procfs 0.17.0",
"protobuf",
"thiserror 2.0.12",
"parking_lot 0.12.3",
"procfs 0.16.0",
"protobuf 2.28.0",
"thiserror 1.0.69",
]
[[package]]
@@ -3245,6 +3161,12 @@ dependencies = [
"prost",
]
[[package]]
name = "protobuf"
version = "2.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
[[package]]
name = "protobuf"
version = "3.7.2"
@@ -3256,6 +3178,15 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "protobuf-codegen"
version = "2.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6"
dependencies = [
"protobuf 2.28.0",
]
[[package]]
name = "protobuf-codegen"
version = "3.7.2"
@@ -3264,7 +3195,7 @@ checksum = "5d3976825c0014bbd2f3b34f0001876604fe87e0c86cd8fa54251530f1544ace"
dependencies = [
"anyhow",
"once_cell",
"protobuf",
"protobuf 3.7.2",
"protobuf-parse",
"regex",
"tempfile",
@@ -3280,7 +3211,7 @@ dependencies = [
"anyhow",
"indexmap 2.9.0",
"log",
"protobuf",
"protobuf 3.7.2",
"protobuf-support",
"tempfile",
"thiserror 1.0.69",
@@ -3302,7 +3233,7 @@ version = "0.1.0"
dependencies = [
"async-trait",
"oci-spec",
"protobuf",
"protobuf 3.7.2",
"serde",
"serde_json",
"ttrpc",
@@ -3380,6 +3311,15 @@ dependencies = [
"getrandom 0.2.16",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags 1.3.2",
]
[[package]]
name = "redox_syscall"
version = "0.5.12"
@@ -3429,7 +3369,7 @@ dependencies = [
"ahash 0.8.12",
"fluent-uri 0.3.2",
"once_cell",
"parking_lot",
"parking_lot 0.12.3",
"percent-encoding",
"serde_json",
]
@@ -3560,7 +3500,7 @@ dependencies = [
"rkyv_derive",
"seahash",
"tinyvec",
"uuid 1.16.0",
"uuid",
]
[[package]]
@@ -3729,10 +3669,10 @@ dependencies = [
"lazy_static",
"libc",
"libseccomp",
"nix 0.26.4",
"nix 0.24.3",
"oci-spec",
"path-absolutize",
"protobuf",
"protobuf 3.7.2",
"protocols",
"regex",
"rlimit",
@@ -3942,23 +3882,20 @@ dependencies = [
[[package]]
name = "serial_test"
version = "0.10.0"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c789ec87f4687d022a2405cf46e0cd6284889f1839de292cadeb6c6019506f2"
checksum = "e0bccbcf40c8938196944a3da0e133e031a33f4d6b72db3bda3cc556e361905d"
dependencies = [
"dashmap",
"futures",
"lazy_static",
"log",
"parking_lot",
"parking_lot 0.11.2",
"serial_test_derive",
]
[[package]]
name = "serial_test_derive"
version = "0.10.0"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3"
checksum = "b2acd6defeddb41eb60bb468f8825d0cfd0c2a76bc03bfd235b6a1dc4f6a1ad5"
dependencies = [
"proc-macro2",
"quote",
@@ -3973,20 +3910,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
dependencies = [
"cfg-if",
"cpufeatures",
"digest 0.10.7",
]
[[package]]
name = "sha2"
version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800"
dependencies = [
"block-buffer 0.9.0",
"cfg-if",
"cpufeatures",
"digest 0.9.0",
"opaque-debug",
"digest",
]
[[package]]
@@ -3997,7 +3921,7 @@ checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
"cpufeatures",
"digest 0.10.7",
"digest",
]
[[package]]
@@ -4069,16 +3993,6 @@ dependencies = [
"thread_local",
]
[[package]]
name = "slog-journald"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83e14eb8c2f5d0c8fc9fbac40e6391095e4dc5cb334f7dce99c75cb1919eb39c"
dependencies = [
"libsystemd",
"slog",
]
[[package]]
name = "slog-json"
version = "2.6.1"
@@ -4218,12 +4132,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "subtle"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
[[package]]
name = "syn"
version = "1.0.109"
@@ -4325,7 +4233,7 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683"
name = "test-utils"
version = "0.1.0"
dependencies = [
"nix 0.26.4",
"nix 0.24.3",
]
[[package]]
@@ -4436,19 +4344,17 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.46.1"
version = "1.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17"
checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165"
dependencies = [
"backtrace",
"bytes 1.10.1",
"io-uring",
"libc",
"mio",
"parking_lot",
"parking_lot 0.12.3",
"pin-project-lite",
"signal-hook-registry",
"slab",
"socket2 0.5.9",
"tokio-macros",
"windows-sys 0.52.0",
@@ -4674,8 +4580,8 @@ dependencies = [
"libc",
"log",
"nix 0.26.4",
"protobuf",
"protobuf-codegen",
"protobuf 3.7.2",
"protobuf-codegen 3.7.2",
"thiserror 1.0.69",
"tokio",
"tokio-vsock 0.4.0",
@@ -4684,28 +4590,30 @@ dependencies = [
[[package]]
name = "ttrpc-codegen"
version = "0.6.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e5c657ef5cea6f6c6073c1be0787ba4482f42a569d4821e467daec795271f86"
checksum = "cdc0529f65223eca94fc5830e7d552d0d152ff42b74aff5c641edac39592f41f"
dependencies = [
"protobuf",
"protobuf-codegen",
"home",
"protobuf 2.28.0",
"protobuf-codegen 3.7.2",
"protobuf-support",
"ttrpc-compiler",
]
[[package]]
name = "ttrpc-compiler"
version = "0.8.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3aa71f4a44711b3b9cc10ed0c7e239ff0fe4b8e6c900a142fb3bb26401385718"
checksum = "9be3fb2fe509cb9c0099b3b5551b759ae714f2dde56dfc713f2a5bda8c16064a"
dependencies = [
"derive-new",
"home",
"prost",
"prost-build",
"prost-types",
"protobuf",
"protobuf-codegen",
"protobuf 2.28.0",
"protobuf-codegen 2.28.0",
"tempfile",
]
@@ -4785,15 +4693,6 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7"
dependencies = [
"serde",
]
[[package]]
name = "uuid"
version = "1.16.0"
@@ -4807,7 +4706,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b082222b4f6619906941c17eb2297fff4c2fb96cb60164170522942a200bd8"
dependencies = [
"outref",
"uuid 1.16.0",
"uuid",
"vsimd",
]
@@ -5031,7 +4930,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys 0.59.0",
"windows-sys 0.48.0",
]
[[package]]

View File

@@ -13,14 +13,13 @@ lazy_static = "1.3.0"
ttrpc = { version = "0.8.4", features = ["async"], default-features = false }
protobuf = "3.7.2"
libc = "0.2.94"
# Notes: nix needs to stay in sync with libs
nix = "0.26.4"
nix = "0.24.2"
capctl = "0.2.0"
scan_fmt = "0.2.6"
scopeguard = "1.0.0"
thiserror = "1.0.26"
regex = "1.10.5"
serial_test = "0.10.0"
serial_test = "0.5.1"
url = "2.5.0"
derivative = "2.2.0"
const_format = "0.2.30"
@@ -31,7 +30,7 @@ async-recursion = "0.3.2"
futures = "0.3.30"
# Async runtime
tokio = { version = "1.46.1", features = ["full"] }
tokio = { version = "1.44.2", features = ["full"] }
tokio-vsock = "0.3.4"
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
@@ -50,7 +49,7 @@ slog-stdlog = "4.0.0"
log = "0.4.11"
cfg-if = "1.0.0"
prometheus = { version = "0.14.0", features = ["process"] }
prometheus = { version = "0.13.0", features = ["process"] }
procfs = "0.12.0"
anyhow = "1"

View File

@@ -217,11 +217,4 @@ codecov-html: check_tarpaulin
##TARGET generate-protocols: generate/update grpc agent protocols
generate-protocols:
image=$$(docker build -q \
--build-arg GO_VERSION=$$(yq '.languages.golang.version' $(CURDIR)/../../versions.yaml) \
--build-arg PROTOC_VERSION=$$(yq '.externals.protoc.version' $(CURDIR)/../../versions.yaml | grep -oE "[0-9.]+") \
--build-arg PROTOC_GEN_GO_VERSION=$$(yq '.externals.protoc-gen-go.version' $(CURDIR)/../../versions.yaml) \
--build-arg TTRPC_VERSION=$$(yq '.externals.ttrpc.version' $(CURDIR)/../../versions.yaml) \
$(CURDIR)/../../tools/packaging/static-build/codegen) && \
docker run --rm --workdir /kata/src/agent -v $(CURDIR)/../..:/kata --user $(shell id -u) $$image \
../libs/protocols/hack/update-generated-proto.sh all

View File

@@ -32,7 +32,6 @@ use crate::cgroups::{DevicesCgroupInfo, Manager};
use crate::console;
use crate::log_child;
use crate::process::Process;
use crate::process::ProcessOperations;
#[cfg(feature = "seccomp")]
use crate::seccomp;
use crate::selinux;
@@ -262,7 +261,7 @@ pub struct LinuxContainer {
pub init_process_start_time: u64,
pub uid_map_path: String,
pub gid_map_path: String,
pub processes: HashMap<String, Process>,
pub processes: HashMap<pid_t, Process>,
pub status: ContainerStatus,
pub created: SystemTime,
pub logger: Logger,
@@ -934,13 +933,17 @@ impl BaseContainer for LinuxContainer {
}
fn processes(&self) -> Result<Vec<i32>> {
Ok(self.processes.values().map(|p| p.pid).collect())
Ok(self.processes.keys().cloned().collect())
}
fn get_process(&mut self, eid: &str) -> Result<&mut Process> {
self.processes
.get_mut(eid)
.ok_or_else(|| anyhow!("invalid eid {}", eid))
for (_, v) in self.processes.iter_mut() {
if eid == v.exec_id.as_str() {
return Ok(v);
}
}
Err(anyhow!("invalid eid {}", eid))
}
fn stats(&self) -> Result<StatsContainerResponse> {
@@ -964,12 +967,6 @@ impl BaseContainer for LinuxContainer {
async fn start(&mut self, mut p: Process) -> Result<()> {
let logger = self.logger.new(o!("eid" => p.exec_id.clone()));
// Check if exec_id is already in use to prevent collisions
if self.processes.contains_key(p.exec_id.as_str()) {
return Err(anyhow!("exec_id '{}' already exists", p.exec_id));
}
let tty = p.tty;
let fifo_file = format!("{}/{}", &self.root, EXEC_FIFO_FILENAME);
info!(logger, "enter container.start!");
@@ -1238,7 +1235,7 @@ impl BaseContainer for LinuxContainer {
let spec = self.config.spec.as_mut().unwrap();
update_namespaces(&self.logger, spec, p.pid)?;
}
self.processes.insert(p.exec_id.clone(), p);
self.processes.insert(p.pid, p);
info!(logger, "wait on child log handler");
let _ = log_handler
@@ -1264,13 +1261,13 @@ impl BaseContainer for LinuxContainer {
let spec = self.config.spec.as_ref().unwrap();
let st = self.oci_state()?;
for process in self.processes.values() {
match signal::kill(process.pid(), Some(Signal::SIGKILL)) {
for pid in self.processes.keys() {
match signal::kill(Pid::from_raw(*pid), Some(Signal::SIGKILL)) {
Err(Errno::ESRCH) => {
info!(
self.logger,
"kill encounters ESRCH, pid: {}, container: {}",
process.pid(),
pid,
self.id.clone()
);
continue;
@@ -2087,11 +2084,10 @@ mod tests {
#[tokio::test]
async fn test_linuxcontainer_get_process() {
let _ = new_linux_container_and_then(|mut c: LinuxContainer| {
let process =
Process::new(&sl(), &oci::Process::default(), "123", true, 1, None).unwrap();
let exec_id = process.exec_id.clone();
c.processes.insert(exec_id, process);
c.processes.insert(
1,
Process::new(&sl(), &oci::Process::default(), "123", true, 1, None).unwrap(),
);
let p = c.get_process("123");
assert!(p.is_ok(), "Expecting Ok, Got {:?}", p);
Ok(())

View File

@@ -554,7 +554,7 @@ impl AgentService {
req: protocols::agent::WaitProcessRequest,
) -> Result<protocols::agent::WaitProcessResponse> {
let cid = req.container_id;
let mut eid = req.exec_id;
let eid = req.exec_id;
let mut resp = WaitProcessResponse::new();
info!(
@@ -587,7 +587,7 @@ impl AgentService {
.get_container(&cid)
.ok_or_else(|| anyhow!("Invalid container id"))?;
let p = match ctr.processes.values_mut().find(|p| p.pid == pid) {
let p = match ctr.processes.get_mut(&pid) {
Some(p) => p,
None => {
// Lost race, pick up exit code from channel
@@ -600,8 +600,6 @@ impl AgentService {
}
};
eid = p.exec_id.clone();
// need to close all fd
// ignore errors for some fd might be closed by stream
p.cleanup_process_stream();
@@ -613,7 +611,7 @@ impl AgentService {
let _ = s.send(p.exit_code).await;
}
ctr.processes.remove(&eid);
ctr.processes.remove(&pid);
Ok(resp)
}
@@ -2672,7 +2670,7 @@ mod tests {
}
linux_container
.processes
.insert(exec_process.exec_id.clone(), exec_process);
.insert(exec_process_id, exec_process);
sandbox.add_container(linux_container);
}

View File

@@ -272,10 +272,8 @@ impl Sandbox {
pub fn find_process(&mut self, pid: pid_t) -> Option<&mut Process> {
for (_, c) in self.containers.iter_mut() {
for p in c.processes.values_mut() {
if p.pid == pid {
return Some(p);
}
if let Some(p) = c.processes.get_mut(&pid) {
return Some(p);
}
}
@@ -288,11 +286,9 @@ impl Sandbox {
.ok_or_else(|| anyhow!(ERR_INVALID_CONTAINER_ID))?;
if eid.is_empty() {
let init_pid = ctr.init_process_pid;
return ctr
.processes
.values_mut()
.find(|p| p.pid == init_pid)
.get_mut(&ctr.init_process_pid)
.ok_or_else(|| anyhow!("cannot find init process!"));
}
@@ -1018,26 +1014,23 @@ mod tests {
linux_container.init_process_pid = 1;
linux_container.id = cid.to_string();
// add init process
let mut init_process =
Process::new(&logger, &oci::Process::default(), "1", true, 1, None).unwrap();
init_process.pid = 1;
linux_container
.processes
.insert("1".to_string(), init_process);
// add exec process
let mut exec_process = Process::new(
&logger,
&oci::Process::default(),
"exec-123",
false,
linux_container.processes.insert(
1,
None,
)
.unwrap();
exec_process.pid = 123;
linux_container
.processes
.insert("exec-123".to_string(), exec_process);
Process::new(&logger, &oci::Process::default(), "1", true, 1, None).unwrap(),
);
// add exec process
linux_container.processes.insert(
123,
Process::new(
&logger,
&oci::Process::default(),
"exec-123",
false,
1,
None,
)
.unwrap(),
);
s.add_container(linux_container);
@@ -1088,8 +1081,8 @@ mod tests {
.unwrap();
// processes interally only have pids when manually set
test_process.pid = test_pid;
let test_exec_id = test_process.exec_id.clone();
linux_container.processes.insert(test_exec_id, test_process);
linux_container.processes.insert(test_pid, test_process);
s.add_container(linux_container);

View File

@@ -143,7 +143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "190baaad529bcfbde9e1a19022c42781bdb6ff9de25721abdb8fd98c0807730b"
dependencies = [
"libc",
"thiserror 1.0.48",
"thiserror",
]
[[package]]
@@ -266,7 +266,7 @@ dependencies = [
"lazy_static",
"libc",
"nix 0.23.2",
"thiserror 1.0.48",
"thiserror",
"vm-memory",
"vmm-sys-util",
]
@@ -275,7 +275,7 @@ dependencies = [
name = "dbs-allocator"
version = "0.1.1"
dependencies = [
"thiserror 1.0.48",
"thiserror",
]
[[package]]
@@ -285,8 +285,8 @@ dependencies = [
"kvm-bindings",
"kvm-ioctls",
"libc",
"memoffset 0.6.5",
"thiserror 1.0.48",
"memoffset",
"thiserror",
"vm-memory",
"vmm-sys-util",
]
@@ -302,7 +302,7 @@ dependencies = [
"kvm-ioctls",
"lazy_static",
"libc",
"thiserror 1.0.48",
"thiserror",
"vm-fdt",
"vm-memory",
]
@@ -311,7 +311,7 @@ dependencies = [
name = "dbs-device"
version = "0.2.0"
dependencies = [
"thiserror 1.0.48",
"thiserror",
]
[[package]]
@@ -354,7 +354,7 @@ dependencies = [
"kvm-ioctls",
"libc",
"log",
"thiserror 1.0.48",
"thiserror",
"vfio-bindings",
"vfio-ioctls",
"vm-memory",
@@ -366,7 +366,7 @@ version = "0.1.0"
dependencies = [
"kvm-bindings",
"serde_json",
"thiserror 1.0.48",
"thiserror",
"vmm-sys-util",
]
@@ -378,7 +378,7 @@ dependencies = [
"dbs-utils",
"dbs-virtio-devices",
"log",
"thiserror 1.0.48",
"thiserror",
"timerfd",
]
@@ -392,7 +392,7 @@ dependencies = [
"log",
"serde",
"serde_json",
"thiserror 1.0.48",
"thiserror",
"timerfd",
"vmm-sys-util",
]
@@ -423,7 +423,7 @@ dependencies = [
"sendfd",
"serde",
"serde_json",
"thiserror 1.0.48",
"thiserror",
"threadpool",
"timerfd",
"vhost",
@@ -491,17 +491,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "displaydoc"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "downcast-rs"
version = "1.2.0"
@@ -547,7 +536,7 @@ dependencies = [
"slog-scope",
"slog-term",
"test-utils",
"thiserror 1.0.48",
"thiserror",
"tracing",
"vfio-bindings",
"vfio-ioctls",
@@ -664,9 +653,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
dependencies = [
"percent-encoding",
]
@@ -924,111 +913,14 @@ dependencies = [
"cc",
]
[[package]]
name = "icu_collections"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47"
dependencies = [
"displaydoc",
"potential_utf",
"yoke",
"zerofrom",
"zerovec",
]
[[package]]
name = "icu_locale_core"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a"
dependencies = [
"displaydoc",
"litemap",
"tinystr",
"writeable",
"zerovec",
]
[[package]]
name = "icu_normalizer"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979"
dependencies = [
"displaydoc",
"icu_collections",
"icu_normalizer_data",
"icu_properties",
"icu_provider",
"smallvec",
"zerovec",
]
[[package]]
name = "icu_normalizer_data"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3"
[[package]]
name = "icu_properties"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b"
dependencies = [
"displaydoc",
"icu_collections",
"icu_locale_core",
"icu_properties_data",
"icu_provider",
"potential_utf",
"zerotrie",
"zerovec",
]
[[package]]
name = "icu_properties_data"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632"
[[package]]
name = "icu_provider"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af"
dependencies = [
"displaydoc",
"icu_locale_core",
"stable_deref_trait",
"tinystr",
"writeable",
"yoke",
"zerofrom",
"zerotrie",
"zerovec",
]
[[package]]
name = "idna"
version = "1.0.3"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
dependencies = [
"idna_adapter",
"smallvec",
"utf8_iter",
]
[[package]]
name = "idna_adapter"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344"
dependencies = [
"icu_normalizer",
"icu_properties",
"unicode-bidi",
"unicode-normalization",
]
[[package]]
@@ -1158,6 +1050,12 @@ dependencies = [
"vm-memory",
]
[[package]]
name = "linux-raw-sys"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "linux-raw-sys"
version = "0.3.8"
@@ -1170,12 +1068,6 @@ version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
[[package]]
name = "litemap"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
[[package]]
name = "lock_api"
version = "0.4.10"
@@ -1227,15 +1119,6 @@ dependencies = [
"autocfg",
]
[[package]]
name = "memoffset"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4"
dependencies = [
"autocfg",
]
[[package]]
name = "mime"
version = "0.3.17"
@@ -1302,7 +1185,7 @@ dependencies = [
"cc",
"cfg-if",
"libc",
"memoffset 0.6.5",
"memoffset",
]
[[package]]
@@ -1314,20 +1197,7 @@ dependencies = [
"bitflags 1.3.2",
"cfg-if",
"libc",
"memoffset 0.6.5",
]
[[package]]
name = "nix"
version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b"
dependencies = [
"bitflags 1.3.2",
"cfg-if",
"libc",
"memoffset 0.7.1",
"pin-utils",
"memoffset",
]
[[package]]
@@ -1461,9 +1331,9 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "openssl"
version = "0.10.73"
version = "0.10.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da"
dependencies = [
"bitflags 2.4.0",
"cfg-if",
@@ -1482,7 +1352,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
]
[[package]]
@@ -1502,9 +1372,9 @@ dependencies = [
[[package]]
name = "openssl-sys"
version = "0.9.109"
version = "0.9.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847"
dependencies = [
"cc",
"libc",
@@ -1538,9 +1408,9 @@ dependencies = [
[[package]]
name = "percent-encoding"
version = "2.3.1"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
[[package]]
name = "pin-project-lite"
@@ -1560,15 +1430,6 @@ version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
[[package]]
name = "potential_utf"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585"
dependencies = [
"zerovec",
]
[[package]]
name = "powerfmt"
version = "0.2.0"
@@ -1577,9 +1438,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "proc-macro2"
version = "1.0.95"
version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778"
checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
dependencies = [
"unicode-ident",
]
@@ -1601,31 +1462,22 @@ dependencies = [
[[package]]
name = "procfs"
version = "0.17.0"
version = "0.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f"
checksum = "b1de8dacb0873f77e6aefc6d71e044761fcc68060290f5b1089fcdf84626bb69"
dependencies = [
"bitflags 2.4.0",
"hex",
"procfs-core",
"rustix 0.38.25",
]
[[package]]
name = "procfs-core"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec"
dependencies = [
"bitflags 2.4.0",
"bitflags 1.3.2",
"byteorder",
"hex",
"lazy_static",
"rustix 0.36.17",
]
[[package]]
name = "prometheus"
version = "0.14.0"
version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a"
checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c"
dependencies = [
"cfg-if",
"fnv",
@@ -1633,36 +1485,22 @@ dependencies = [
"libc",
"memchr",
"parking_lot",
"procfs 0.17.0",
"procfs 0.14.2",
"protobuf",
"thiserror 2.0.12",
"thiserror",
]
[[package]]
name = "protobuf"
version = "3.7.2"
version = "2.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4"
dependencies = [
"once_cell",
"protobuf-support",
"thiserror 1.0.48",
]
[[package]]
name = "protobuf-support"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6"
dependencies = [
"thiserror 1.0.48",
]
checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
[[package]]
name = "quote"
version = "1.0.40"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
"proc-macro2",
]
@@ -1699,7 +1537,7 @@ checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b"
dependencies = [
"getrandom 0.2.10",
"redox_syscall 0.2.16",
"thiserror 1.0.48",
"thiserror",
]
[[package]]
@@ -1754,6 +1592,20 @@ version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustix"
version = "0.36.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "305efbd14fde4139eb501df5f136994bb520b033fa9fbdce287507dc23b8c7ed"
dependencies = [
"bitflags 1.3.2",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys 0.1.4",
"windows-sys 0.45.0",
]
[[package]]
name = "rustix"
version = "0.37.28"
@@ -1866,7 +1718,7 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
]
[[package]]
@@ -1973,9 +1825,9 @@ dependencies = [
[[package]]
name = "smallvec"
version = "1.15.1"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]]
name = "socket2"
@@ -1987,12 +1839,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "subtle"
version = "2.5.0"
@@ -2012,26 +1858,15 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.104"
version = "2.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "synstructure"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "take_mut"
version = "0.2.2"
@@ -2077,7 +1912,7 @@ dependencies = [
name = "test-utils"
version = "0.1.0"
dependencies = [
"nix 0.26.4",
"nix 0.24.3",
]
[[package]]
@@ -2086,16 +1921,7 @@ version = "1.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7"
dependencies = [
"thiserror-impl 1.0.48",
]
[[package]]
name = "thiserror"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
dependencies = [
"thiserror-impl 2.0.12",
"thiserror-impl",
]
[[package]]
@@ -2106,18 +1932,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "thiserror-impl"
version = "2.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
]
[[package]]
@@ -2180,15 +1995,20 @@ dependencies = [
]
[[package]]
name = "tinystr"
version = "0.8.1"
name = "tinyvec"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b"
checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
dependencies = [
"displaydoc",
"zerovec",
"tinyvec_macros",
]
[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.44.2"
@@ -2213,7 +2033,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
]
[[package]]
@@ -2274,7 +2094,7 @@ checksum = "1b1ffbcf9c6f6b99d386e7444eb608ba646ae452a36b39737deb9663b610f662"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
]
[[package]]
@@ -2298,6 +2118,12 @@ version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "unicode-bidi"
version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
[[package]]
name = "unicode-ident"
version = "1.0.11"
@@ -2305,22 +2131,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
[[package]]
name = "url"
version = "2.5.4"
name = "unicode-normalization"
version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
dependencies = [
"tinyvec",
]
[[package]]
name = "url"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5"
dependencies = [
"form_urlencoded",
"idna",
"percent-encoding",
]
[[package]]
name = "utf8_iter"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "vcpkg"
version = "0.2.15"
@@ -2350,7 +2179,7 @@ dependencies = [
"kvm-ioctls",
"libc",
"log",
"thiserror 2.0.12",
"thiserror",
"vfio-bindings",
"vm-memory",
"vmm-sys-util",
@@ -2464,7 +2293,7 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
"wasm-bindgen-shared",
]
@@ -2498,7 +2327,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"syn 2.0.32",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -2550,6 +2379,15 @@ dependencies = [
"windows-targets 0.48.5",
]
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
@@ -2568,6 +2406,21 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
@@ -2599,6 +2452,12 @@ dependencies = [
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
@@ -2611,6 +2470,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
@@ -2623,6 +2488,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
@@ -2641,6 +2512,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
@@ -2653,6 +2530,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
@@ -2665,6 +2548,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
@@ -2677,6 +2566,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
@@ -2708,12 +2603,6 @@ dependencies = [
"bitflags 2.4.0",
]
[[package]]
name = "writeable"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
[[package]]
name = "xattr"
version = "1.0.1"
@@ -2723,84 +2612,6 @@ dependencies = [
"libc",
]
[[package]]
name = "yoke"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc"
dependencies = [
"serde",
"stable_deref_trait",
"yoke-derive",
"zerofrom",
]
[[package]]
name = "yoke-derive"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"synstructure",
]
[[package]]
name = "zerofrom"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
dependencies = [
"zerofrom-derive",
]
[[package]]
name = "zerofrom-derive"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
"synstructure",
]
[[package]]
name = "zerotrie"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595"
dependencies = [
"displaydoc",
"yoke",
"zerofrom",
]
[[package]]
name = "zerovec"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428"
dependencies = [
"yoke",
"zerofrom",
"zerovec-derive",
]
[[package]]
name = "zerovec-derive"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "zstd"
version = "0.11.2+zstd.1.5.2"

View File

@@ -87,7 +87,7 @@ linux-loader = {workspace = true}
log = "0.4.14"
nix = "0.24.2"
procfs = "0.12.0"
prometheus = { version = "0.14.0", features = ["process"] }
prometheus = { version = "0.13.0", features = ["process"] }
seccompiler = {workspace = true}
serde = "1.0.27"
serde_derive = "1.0.27"

View File

@@ -146,6 +146,7 @@ mod tests {
assert!(MacAddr::from_bytes(&src3[..]).is_err());
}
#[cfg(feature = "with-serde")]
#[test]
fn test_mac_addr_serialization_and_deserialization() {
let mac: MacAddr =

View File

@@ -313,8 +313,8 @@ mod tests {
pub struct TestContext {
pub cid: u64,
pub mem: GuestMemoryMmap,
pub _mem_size: usize,
pub _epoll_manager: EpollManager,
pub mem_size: usize,
pub epoll_manager: EpollManager,
pub device: Vsock<Arc<GuestMemoryMmap>, TestMuxer>,
}
@@ -327,8 +327,8 @@ mod tests {
Self {
cid: CID,
mem,
_mem_size: MEM_SIZE,
_epoll_manager: epoll_manager.clone(),
mem_size: MEM_SIZE,
epoll_manager: epoll_manager.clone(),
device: Vsock::new_with_muxer(
CID,
Arc::new(defs::QUEUE_SIZES.to_vec()),
@@ -394,7 +394,7 @@ mod tests {
EventHandlerContext {
guest_rxvq,
guest_txvq,
_guest_evvq: guest_evvq,
guest_evvq,
queues,
epoll_handler: None,
device: Vsock::new_with_muxer(
@@ -422,7 +422,7 @@ mod tests {
pub queues: Vec<VirtioQueueConfig<QueueSync>>,
pub guest_rxvq: GuestQ<'a>,
pub guest_txvq: GuestQ<'a>,
pub _guest_evvq: GuestQ<'a>,
pub guest_evvq: GuestQ<'a>,
pub mem: Arc<GuestMemoryMmap>,
}

View File

@@ -17,6 +17,7 @@ use tracing::instrument;
use crate::error::{Result, StartMicroVmError, StopMicrovmError};
use crate::event_manager::EventManager;
use crate::tracer::{DragonballTracer, TraceError, TraceInfo};
use crate::vcpu::VcpuManagerError;
use crate::vm::{CpuTopology, KernelConfigInfo, VmConfigInfo};
use crate::vmm::Vmm;
@@ -54,8 +55,6 @@ pub use crate::device_manager::virtio_net_dev_mgr::{
};
#[cfg(feature = "virtio-vsock")]
pub use crate::device_manager::vsock_dev_mgr::{VsockDeviceConfigInfo, VsockDeviceError};
#[cfg(feature = "host-device")]
use crate::vcpu::VcpuManagerError;
#[cfg(feature = "hotplug")]
pub use crate::vcpu::{VcpuResizeError, VcpuResizeInfo};

View File

@@ -879,7 +879,7 @@ impl DeviceManager {
/// Start all registered devices when booting the associated virtual machine.
pub fn start_devices(
&mut self,
#[allow(unused)] vm_as: &GuestAddressSpaceImpl,
vm_as: &GuestAddressSpaceImpl,
) -> std::result::Result<(), StartMicroVmError> {
// It is safe because we don't expect poison lock.
#[cfg(feature = "host-device")]
@@ -899,7 +899,6 @@ impl DeviceManager {
address_space: Option<&AddressSpace>,
) -> Result<()> {
// create context for removing devices
#[allow(unused)]
let mut ctx = DeviceOpContext::new(
Some(epoll_mgr),
self,
@@ -1276,9 +1275,7 @@ mod tests {
use dbs_address_space::{AddressSpaceLayout, AddressSpaceRegion, AddressSpaceRegionType};
use kvm_ioctls::Kvm;
use test_utils::skip_if_not_root;
#[cfg(feature = "virtio-fs")]
use vm_memory::MmapRegion;
use vm_memory::{GuestAddress, GuestUsize};
use vm_memory::{GuestAddress, GuestUsize, MmapRegion};
use super::*;
#[cfg(target_arch = "x86_64")]

View File

@@ -18,7 +18,7 @@ common-path = "=1.0.0"
fail = "0.5.0"
lazy_static = "1.4.0"
libc = "0.2.100"
nix = "0.26.4"
nix = "0.24.2"
once_cell = "1.9.0"
serde = { version = "1.0.138", features = ["derive"] }
serde_json = "1.0.73"

View File

@@ -37,7 +37,7 @@ safe-path = { path = "../safe-path" }
[dev-dependencies]
tempfile = "3.19.1"
test-utils = { path = "../test-utils" }
nix = "0.26.4"
nix = "0.24.2"
[features]
default = []

View File

@@ -273,7 +273,8 @@ pub const KATA_ANNO_CFG_HYPERVISOR_VIRTIO_FS_EXTRA_ARGS: &str =
/// A sandbox annotation to specify as the msize for 9p shares.
pub const KATA_ANNO_CFG_HYPERVISOR_MSIZE_9P: &str = "io.katacontainers.config.hypervisor.msize_9p";
/// The initdata annotation passed in when CVM launchs
pub const KATA_ANNO_CFG_RUNTIME_INIT_DATA: &str = "io.katacontainers.config.runtime.cc_init_data";
pub const KATA_ANNO_CFG_HYPERVISOR_INIT_DATA: &str =
"io.katacontainers.config.hypervisor.cc_init_data";
/// GPU specific annotations for remote hypervisor to help with instance selection
/// It's for minimum number of GPUs required for the VM.
@@ -634,13 +635,13 @@ impl Annotation {
KATA_ANNO_CFG_HYPERVISOR_CPU_FEATURES => {
hv.cpu_info.cpu_features = value.to_string();
}
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_VCPUS => match self.get_value::<f32>(key) {
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_VCPUS => match self.get_value::<i32>(key) {
Ok(num_cpus) => {
let num_cpus = num_cpus.unwrap_or_default();
if num_cpus
> get_hypervisor_plugin(hypervisor_name)
.unwrap()
.get_max_cpus() as f32
.get_max_cpus() as i32
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
@@ -894,7 +895,7 @@ impl Annotation {
hv.security_info.validate_path(value)?;
hv.security_info.guest_hook_path = value.to_string();
}
KATA_ANNO_CFG_RUNTIME_INIT_DATA => {
KATA_ANNO_CFG_HYPERVISOR_INIT_DATA => {
hv.security_info.initdata =
add_hypervisor_initdata_overrides(value).unwrap();
}
@@ -1078,9 +1079,6 @@ impl Annotation {
}
}
}
config.adjust_config()?;
Ok(())
}
}

View File

@@ -37,9 +37,6 @@ pub const DEFAULT_INTERNETWORKING_MODEL: &str = "tcfilter";
pub const DEFAULT_BLOCK_DEVICE_TYPE: &str = "virtio-blk-pci";
pub const DEFAULT_VHOST_USER_STORE_PATH: &str = "/var/run/vhost-user";
pub const DEFAULT_BLOCK_NVDIMM_MEM_OFFSET: u64 = 0;
pub const DEFAULT_BLOCK_DEVICE_AIO_THREADS: &str = "threads";
pub const DEFAULT_BLOCK_DEVICE_AIO_NATIVE: &str = "native";
pub const DEFAULT_BLOCK_DEVICE_AIO: &str = "io_uring";
pub const DEFAULT_SHARED_FS_TYPE: &str = "virtio-fs";
pub const DEFAULT_VIRTIO_FS_CACHE_MODE: &str = "never";

View File

@@ -369,7 +369,7 @@ mod drop_in_directory_handling {
config.hypervisor["qemu"].path,
"/usr/bin/qemu-kvm".to_string()
);
assert_eq!(config.hypervisor["qemu"].cpu_info.default_vcpus, 2.0);
assert_eq!(config.hypervisor["qemu"].cpu_info.default_vcpus, 2);
assert_eq!(config.hypervisor["qemu"].device_info.default_bridges, 4);
assert_eq!(
config.hypervisor["qemu"].shared_fs.shared_fs.as_deref(),

View File

@@ -109,7 +109,7 @@ impl ConfigPlugin for CloudHypervisorConfig {
return Err(eother!("Both guest boot image and initrd for CH are empty"));
}
if (ch.cpu_info.default_vcpus > 0.0
if (ch.cpu_info.default_vcpus > 0
&& ch.cpu_info.default_vcpus as u32 > default::MAX_CH_VCPUS)
|| ch.cpu_info.default_maxvcpus > default::MAX_CH_VCPUS
{

View File

@@ -66,7 +66,7 @@ impl ConfigPlugin for DragonballConfig {
}
if db.cpu_info.default_vcpus as u32 > db.cpu_info.default_maxvcpus {
db.cpu_info.default_vcpus = db.cpu_info.default_maxvcpus as f32;
db.cpu_info.default_vcpus = db.cpu_info.default_maxvcpus as i32;
}
if db.machine_info.entropy_source.is_empty() {
@@ -135,7 +135,7 @@ impl ConfigPlugin for DragonballConfig {
));
}
if (db.cpu_info.default_vcpus > 0.0
if (db.cpu_info.default_vcpus > 0
&& db.cpu_info.default_vcpus as u32 > default::MAX_DRAGONBALL_VCPUS)
|| db.cpu_info.default_maxvcpus > default::MAX_DRAGONBALL_VCPUS
{

View File

@@ -93,7 +93,7 @@ impl ConfigPlugin for FirecrackerConfig {
));
}
if (firecracker.cpu_info.default_vcpus > 0.0
if (firecracker.cpu_info.default_vcpus > 0
&& firecracker.cpu_info.default_vcpus as u32 > default::MAX_FIRECRACKER_VCPUS)
|| firecracker.cpu_info.default_maxvcpus > default::MAX_FIRECRACKER_VCPUS
{

View File

@@ -107,21 +107,6 @@ pub struct BlockDeviceInfo {
#[serde(default)]
pub block_device_driver: String,
/// Block device AIO is the I/O mechanism specially for Qemu
/// Options:
///
/// - threads
/// Pthread based disk I/O.
///
/// - native
/// Native Linux I/O.
///
/// - io_uring
/// Linux io_uring API. This provides the fastest I/O operations on Linux, requires kernel > 5.1 and
/// qemu >= 5.0.
#[serde(default)]
pub block_device_aio: String,
/// Specifies cache-related options will be set to block devices or not.
#[serde(default)]
pub block_device_cache_set: bool,
@@ -183,21 +168,6 @@ impl BlockDeviceInfo {
if self.block_device_driver.is_empty() {
self.block_device_driver = default::DEFAULT_BLOCK_DEVICE_TYPE.to_string();
}
if self.block_device_aio.is_empty() {
self.block_device_aio = default::DEFAULT_BLOCK_DEVICE_AIO.to_string();
} else {
const VALID_BLOCK_DEVICE_AIO: &[&str] = &[
default::DEFAULT_BLOCK_DEVICE_AIO,
default::DEFAULT_BLOCK_DEVICE_AIO_NATIVE,
default::DEFAULT_BLOCK_DEVICE_AIO_THREADS,
];
if !VALID_BLOCK_DEVICE_AIO.contains(&self.block_device_aio.as_str()) {
return Err(eother!(
"{} is unsupported block device AIO mode.",
self.block_device_aio
));
}
}
if self.memory_offset == 0 {
self.memory_offset = default::DEFAULT_BLOCK_NVDIMM_MEM_OFFSET;
}
@@ -348,7 +318,7 @@ pub struct CpuInfo {
/// > 0 <= number of physical cores --> will be set to the specified number
/// > number of physical cores --> will be set to the actual number of physical cores
#[serde(default)]
pub default_vcpus: f32,
pub default_vcpus: i32,
/// Default maximum number of vCPUs per SB/VM:
/// - unspecified or == 0 --> will be set to the actual number of physical cores or
@@ -380,22 +350,22 @@ impl CpuInfo {
let features: Vec<&str> = self.cpu_features.split(',').map(|v| v.trim()).collect();
self.cpu_features = features.join(",");
let cpus = num_cpus::get() as f32;
let cpus = num_cpus::get() as u32;
// adjust default_maxvcpus
if self.default_maxvcpus == 0 || self.default_maxvcpus as f32 > cpus {
self.default_maxvcpus = cpus as u32;
if self.default_maxvcpus == 0 || self.default_maxvcpus > cpus {
self.default_maxvcpus = cpus;
}
// adjust default_vcpus
if self.default_vcpus < 0.0 || self.default_vcpus > cpus {
self.default_vcpus = cpus;
} else if self.default_vcpus == 0.0 {
self.default_vcpus = default::DEFAULT_GUEST_VCPUS as f32;
if self.default_vcpus < 0 || self.default_vcpus as u32 > cpus {
self.default_vcpus = cpus as i32;
} else if self.default_vcpus == 0 {
self.default_vcpus = default::DEFAULT_GUEST_VCPUS as i32;
}
if self.default_vcpus > self.default_maxvcpus as f32 {
self.default_vcpus = self.default_maxvcpus as f32;
if self.default_vcpus > self.default_maxvcpus as i32 {
self.default_vcpus = self.default_maxvcpus as i32;
}
Ok(())
@@ -403,7 +373,7 @@ impl CpuInfo {
/// Validate the configuration information.
pub fn validate(&self) -> Result<()> {
if self.default_vcpus > self.default_maxvcpus as f32 {
if self.default_vcpus > self.default_maxvcpus as i32 {
return Err(eother!(
"The default_vcpus({}) is greater than default_maxvcpus({})",
self.default_vcpus,
@@ -1413,8 +1383,8 @@ mod tests {
#[test]
fn test_cpu_info_adjust_config() {
// get CPU cores of the test node
let node_cpus = num_cpus::get() as f32;
let default_vcpus = default::DEFAULT_GUEST_VCPUS as f32;
let node_cpus = num_cpus::get() as u32;
let default_vcpus = default::DEFAULT_GUEST_VCPUS as i32;
struct TestData<'a> {
desc: &'a str,
@@ -1427,38 +1397,38 @@ mod tests {
desc: "all with default values",
input: &mut CpuInfo {
cpu_features: "".to_string(),
default_vcpus: 0.0,
default_vcpus: 0,
default_maxvcpus: 0,
},
output: CpuInfo {
cpu_features: "".to_string(),
default_vcpus,
default_maxvcpus: node_cpus as u32,
default_maxvcpus: node_cpus,
},
},
TestData {
desc: "all with big values",
input: &mut CpuInfo {
cpu_features: "a,b,c".to_string(),
default_vcpus: 9999999.0,
default_vcpus: 9999999,
default_maxvcpus: 9999999,
},
output: CpuInfo {
cpu_features: "a,b,c".to_string(),
default_vcpus: node_cpus,
default_maxvcpus: node_cpus as u32,
default_vcpus: node_cpus as i32,
default_maxvcpus: node_cpus,
},
},
TestData {
desc: "default_vcpus lager than default_maxvcpus",
input: &mut CpuInfo {
cpu_features: "a, b ,c".to_string(),
default_vcpus: -1.0,
default_vcpus: -1,
default_maxvcpus: 1,
},
output: CpuInfo {
cpu_features: "a,b,c".to_string(),
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
},
},

View File

@@ -128,7 +128,7 @@ impl ConfigPlugin for QemuConfig {
}
}
if (qemu.cpu_info.default_vcpus > 0.0
if (qemu.cpu_info.default_vcpus > 0
&& qemu.cpu_info.default_vcpus as u32 > default::MAX_QEMU_VCPUS)
|| qemu.cpu_info.default_maxvcpus > default::MAX_QEMU_VCPUS
{

View File

@@ -131,7 +131,9 @@ impl TomlConfig {
pub fn load_from_file<P: AsRef<Path>>(config_file: P) -> Result<(TomlConfig, PathBuf)> {
let mut result = Self::load_raw_from_file(config_file);
if let Ok((ref mut config, _)) = result {
config.adjust_config()?;
Hypervisor::adjust_config(config)?;
Runtime::adjust_config(config)?;
Agent::adjust_config(config)?;
info!(sl!(), "get kata config: {:?}", config);
}
@@ -173,20 +175,13 @@ impl TomlConfig {
/// drop-in config file fragments in config.d/.
pub fn load(content: &str) -> Result<TomlConfig> {
let mut config: TomlConfig = toml::from_str(content)?;
config.adjust_config()?;
Hypervisor::adjust_config(&mut config)?;
Runtime::adjust_config(&mut config)?;
Agent::adjust_config(&mut config)?;
info!(sl!(), "get kata config: {:?}", config);
Ok(config)
}
/// Adjust Kata configuration information.
pub fn adjust_config(&mut self) -> Result<()> {
Hypervisor::adjust_config(self)?;
Runtime::adjust_config(self)?;
Agent::adjust_config(self)?;
Ok(())
}
/// Validate Kata configuration information.
pub fn validate(&self) -> Result<()> {
Hypervisor::validate(self)?;

View File

@@ -186,7 +186,7 @@ mod tests {
"./test_hypervisor_hook_path"
);
assert!(!hv.memory_info.enable_mem_prealloc);
assert_eq!(hv.cpu_info.default_vcpus, 12.0);
assert_eq!(hv.cpu_info.default_vcpus, 12);
assert!(!hv.memory_info.enable_guest_swap);
assert_eq!(hv.memory_info.default_memory, 100);
assert!(!hv.enable_iothreads);

View File

@@ -22,7 +22,6 @@ slog-json = "2.4.0"
slog-term = "2.9.1"
slog-async = "2.7.0"
slog-scope = "4.4.0"
slog-journald = "2.2.0"
lazy_static = "1.3.0"
arc-swap = "1.5.0"

View File

@@ -81,11 +81,6 @@ pub fn create_term_logger(level: slog::Level) -> (slog::Logger, slog_async::Asyn
(logger, guard)
}
pub enum LogDestination {
File(Box<dyn Write + Send + Sync>),
Journal,
}
// Creates a logger which prints output as JSON
// XXX: 'writer' param used to make testing possible.
pub fn create_logger<W>(
@@ -97,43 +92,13 @@ pub fn create_logger<W>(
where
W: Write + Send + Sync + 'static,
{
create_logger_with_destination(name, source, level, LogDestination::File(Box::new(writer)))
}
// Creates a logger which prints output as JSON or to systemd journal
pub fn create_logger_with_destination(
name: &str,
source: &str,
level: slog::Level,
destination: LogDestination,
) -> (slog::Logger, slog_async::AsyncGuard) {
// Check the destination type before consuming it.
// The `matches` macro performs a non-consuming check (it borrows).
let is_journal_destination = matches!(destination, LogDestination::Journal);
// The target type for boxed drain. Note that Err = slog::Never.
// Both `.fuse()` and `.ignore_res()` convert potential errors into a non-returning path
// (panic or ignore), so they never return an Err.
let drain: Box<dyn Drain<Ok = (), Err = slog::Never> + Send> = match destination {
LogDestination::File(writer) => {
// `destination` is `File`.
let json_drain = slog_json::Json::new(writer)
.add_default_keys()
.build()
.fuse();
Box::new(json_drain)
}
LogDestination::Journal => {
// `destination` is `Journal`.
let journal_drain = slog_journald::JournaldDrain.ignore_res();
Box::new(journal_drain)
}
};
let json_drain = slog_json::Json::new(writer)
.add_default_keys()
.build()
.fuse();
// Ensure only a unique set of key/value fields is logged
let unique_drain = UniqueDrain::new(drain).fuse();
let unique_drain = UniqueDrain::new(json_drain).fuse();
// Adjust the level which will be applied to the log-system
// Info is the default level, but if Debug flag is set, the overall log level will be changed to Debug here
@@ -154,28 +119,16 @@ pub fn create_logger_with_destination(
.thread_name("slog-async-logger".into())
.build_with_guard();
// Create a base logger with common fields.
let base_logger = slog::Logger::root(
// Add some "standard" fields
let logger = slog::Logger::root(
async_drain.fuse(),
o!(
"version" => env!("CARGO_PKG_VERSION"),
o!("version" => env!("CARGO_PKG_VERSION"),
"subsystem" => DEFAULT_SUBSYSTEM,
"pid" => process::id().to_string(),
"name" => name.to_string(),
"source" => source.to_string()
),
"source" => source.to_string()),
);
// If not journal destination, the logger remains the base_logger.
let logger = if is_journal_destination {
// Use the .new() method to build a child logger which inherits all existing
// key-value pairs from its parent and supplements them with additional ones.
// This is the idiomatic way.
base_logger.new(o!("SYSLOG_IDENTIFIER" => "kata"))
} else {
base_logger
};
(logger, guard)
}
@@ -549,12 +502,7 @@ mod tests {
let record_key = "record-key-1";
let record_value = "record-key-2";
let (logger, guard) = create_logger_with_destination(
name,
source,
level,
LogDestination::File(Box::new(writer)),
);
let (logger, guard) = create_logger(name, source, level, writer);
let msg = "foo, bar, baz";
@@ -713,12 +661,7 @@ mod tests {
.reopen()
.unwrap_or_else(|_| panic!("{:?}: failed to clone tempfile", msg));
let (logger, logger_guard) = create_logger_with_destination(
name,
source,
d.slog_level,
LogDestination::File(Box::new(writer)),
);
let (logger, logger_guard) = create_logger(name, source, d.slog_level, writer);
// Call the logger (which calls the drain)
(d.closure)(&logger, d.msg.to_owned());

View File

@@ -19,5 +19,5 @@ serde_json = "1.0.68"
oci-spec = { version = "0.8.1", features = ["runtime"] }
[build-dependencies]
ttrpc-codegen = "0.6.0"
ttrpc-codegen = "0.5.0"
protobuf = { version = "3.7.2" }

View File

@@ -13,7 +13,7 @@ edition = "2018"
[dependencies]
anyhow = "^1.0"
nix = "0.26.4"
nix = "0.24.0"
tokio = { version = "1.44.2", features = ["rt-multi-thread"] }
hyper = { version = "0.14.20", features = ["stream", "server", "http1"] }
hyperlocal = "0.8"

View File

@@ -12,4 +12,4 @@ license = "Apache-2.0"
edition = "2018"
[dependencies]
nix = "0.26.4"
nix = "0.24.2"

View File

@@ -1 +0,0 @@
Cargo.lock

File diff suppressed because it is too large Load Diff

View File

@@ -49,8 +49,8 @@ dbs-utils = { path = "../dragonball/dbs_utils" }
actix-rt = "2.7.0"
anyhow = "1.0"
async-trait = "0.1.48"
containerd-shim = { version = "0.10.0", features = ["async"] }
containerd-shim-protos = { version = "0.10.0", features = ["async"] }
containerd-shim = { version = "0.6.0", features = ["async"] }
containerd-shim-protos = { version = "0.6.0", features = ["async"] }
go-flag = "0.1.0"
hyper = "0.14.20"
hyperlocal = "0.8.0"
@@ -58,8 +58,7 @@ lazy_static = "1.4"
libc = "0.2"
log = "0.4.14"
netns-rs = "0.1.0"
# Note: nix needs to stay sync'd with libs versions
nix = "0.26.4"
nix = "0.24.2"
oci-spec = { version = "0.8.1", features = ["runtime"] }
protobuf = "3.7.2"
rand = "0.8.4"
@@ -70,7 +69,7 @@ slog-scope = "4.4.0"
strum = { version = "0.24.0", features = ["derive"] }
tempfile = "3.19.1"
thiserror = "1.0"
tokio = "1.46.1"
tokio = "1.38.2"
tracing = "0.1.41"
tracing-opentelemetry = "0.18.0"
ttrpc = "0.8.4"

View File

@@ -55,15 +55,6 @@ impl Sock for Vsock {
connect(socket.as_raw_fd(), &sock_addr)
.with_context(|| format!("failed to connect to {}", sock_addr))?;
// Started from tokio v1.44.0+, it would panic when giving
// `from_std()` a blocking socket. A workaround is to set the
// socket to non-blocking, see [1].
//
// https://github.com/tokio-rs/tokio/issues/7172
socket
.set_nonblocking(true)
.context("failed to set non-blocking")?;
// Finally, convert the std UnixSocket to tokio's UnixSocket.
UnixStream::from_std(socket).context("from_std")
};

View File

@@ -85,7 +85,7 @@ hypervisor = { workspace = true, features = ["cloud-hypervisor"] }
test-utils = { workspace = true }
[build-dependencies]
ttrpc-codegen = "0.6.0"
ttrpc-codegen = "0.4.2"
[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = [

View File

@@ -319,12 +319,12 @@ impl TryFrom<(CpuInfo, GuestProtection)> for CpusConfig {
let guest_protection_to_use = args.1;
// This can only happen if runtime-rs fails to set default values.
if cpu.default_vcpus <= 0.0 {
if cpu.default_vcpus <= 0 {
return Err(CpusConfigError::BootVCPUsTooSmall);
}
let default_vcpus = u8::try_from(cpu.default_vcpus.ceil() as u32)
.map_err(CpusConfigError::BootVCPUsTooBig)?;
let default_vcpus =
u8::try_from(cpu.default_vcpus).map_err(CpusConfigError::BootVCPUsTooBig)?;
// This can only happen if runtime-rs fails to set default values.
if cpu.default_maxvcpus == 0 {
@@ -611,7 +611,7 @@ mod tests {
};
let cpu_info = CpuInfo {
default_vcpus: cpu_default as f32,
default_vcpus: cpu_default as i32,
default_maxvcpus,
..Default::default()
@@ -1159,7 +1159,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: -1.0,
default_vcpus: -1,
..Default::default()
},
@@ -1168,7 +1168,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 0,
..Default::default()
@@ -1178,7 +1178,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 9.0,
default_vcpus: 9,
default_maxvcpus: 7,
..Default::default()
@@ -1188,7 +1188,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
..Default::default()
},
@@ -1208,7 +1208,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 3,
..Default::default()
},
@@ -1228,7 +1228,7 @@ mod tests {
},
TestData {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 13,
..Default::default()
},
@@ -1823,7 +1823,7 @@ mod tests {
cfg: HypervisorConfig {
cpu_info: CpuInfo {
default_vcpus: 0.0,
default_vcpus: 0,
..cpu_info.clone()
},
@@ -1939,7 +1939,7 @@ mod tests {
vsock_socket_path: "vsock_socket_path".into(),
cfg: HypervisorConfig {
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
..Default::default()
@@ -1963,7 +1963,7 @@ mod tests {
..Default::default()
},
cpu_info: CpuInfo {
default_vcpus: 1.0,
default_vcpus: 1,
default_maxvcpus: 1,
..Default::default()

View File

@@ -8,15 +8,14 @@ use std::{collections::HashMap, sync::Arc};
use anyhow::{anyhow, Context, Result};
use kata_sys_util::rand::RandomBytes;
use kata_types::config::hypervisor::{TopologyConfigInfo, VIRTIO_SCSI};
use kata_types::config::hypervisor::TopologyConfigInfo;
use tokio::sync::{Mutex, RwLock};
use crate::{
vhost_user_blk::VhostUserBlkDevice, BlockConfig, BlockDevice, HybridVsockDevice, Hypervisor,
NetworkDevice, PCIePortDevice, ProtectionDevice, ShareFsDevice, VfioDevice, VhostUserConfig,
VhostUserNetDevice, VsockDevice, KATA_BLK_DEV_TYPE, KATA_CCW_DEV_TYPE, KATA_MMIO_BLK_DEV_TYPE,
KATA_NVDIMM_DEV_TYPE, KATA_SCSI_DEV_TYPE, VIRTIO_BLOCK_CCW, VIRTIO_BLOCK_MMIO,
VIRTIO_BLOCK_PCI, VIRTIO_PMEM,
KATA_NVDIMM_DEV_TYPE, VIRTIO_BLOCK_CCW, VIRTIO_BLOCK_MMIO, VIRTIO_BLOCK_PCI, VIRTIO_PMEM,
};
use super::{
@@ -472,9 +471,6 @@ impl DeviceManager {
block_config.driver_option = KATA_NVDIMM_DEV_TYPE.to_string();
is_pmem = true;
}
VIRTIO_SCSI => {
block_config.driver_option = KATA_SCSI_DEV_TYPE.to_string();
}
_ => {
return Err(anyhow!(
"unsupported driver type {}",

View File

@@ -25,8 +25,7 @@ pub use vhost_user::{VhostUserConfig, VhostUserDevice, VhostUserType};
pub use vhost_user_net::VhostUserNetDevice;
pub use virtio_blk::{
BlockConfig, BlockDevice, KATA_BLK_DEV_TYPE, KATA_CCW_DEV_TYPE, KATA_MMIO_BLK_DEV_TYPE,
KATA_NVDIMM_DEV_TYPE, KATA_SCSI_DEV_TYPE, VIRTIO_BLOCK_CCW, VIRTIO_BLOCK_MMIO,
VIRTIO_BLOCK_PCI, VIRTIO_PMEM,
KATA_NVDIMM_DEV_TYPE, VIRTIO_BLOCK_CCW, VIRTIO_BLOCK_MMIO, VIRTIO_BLOCK_PCI, VIRTIO_PMEM,
};
pub use virtio_fs::{
ShareFsConfig, ShareFsDevice, ShareFsMountConfig, ShareFsMountOperation, ShareFsMountType,

View File

@@ -23,41 +23,6 @@ pub const KATA_MMIO_BLK_DEV_TYPE: &str = "mmioblk";
pub const KATA_BLK_DEV_TYPE: &str = "blk";
pub const KATA_CCW_DEV_TYPE: &str = "ccw";
pub const KATA_NVDIMM_DEV_TYPE: &str = "nvdimm";
pub const KATA_SCSI_DEV_TYPE: &str = "scsi";
#[derive(Clone, Copy, Debug, Default)]
pub enum BlockDeviceAio {
// IoUring is the Linux io_uring I/O implementation.
#[default]
IoUring,
// Native is the native Linux AIO implementation.
Native,
// Threads is the pthread asynchronous I/O implementation.
Threads,
}
impl BlockDeviceAio {
pub fn new(aio: &str) -> Self {
match aio {
"native" => BlockDeviceAio::Native,
"threads" => BlockDeviceAio::Threads,
_ => BlockDeviceAio::IoUring,
}
}
}
impl std::fmt::Display for BlockDeviceAio {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let to_string = match *self {
BlockDeviceAio::Native => "native".to_string(),
BlockDeviceAio::Threads => "threads".to_string(),
_ => "iouring".to_string(),
};
write!(f, "{}", to_string)
}
}
#[derive(Debug, Clone, Default)]
pub struct BlockConfig {
@@ -79,9 +44,6 @@ pub struct BlockConfig {
/// device index
pub index: u64,
/// blkdev_aio defines the type of asynchronous I/O the block device should use.
pub blkdev_aio: BlockDeviceAio,
/// driver type for block device
pub driver_option: String,
@@ -91,10 +53,6 @@ pub struct BlockConfig {
/// pci path is the slot at which the drive is attached
pub pci_path: Option<PciPath>,
/// scsi_addr of the block device, in case the device is attached using SCSI driver
/// scsi_addr is of the format SCSI-Id:LUN
pub scsi_addr: Option<String>,
/// device attach count
pub attach_count: u64,

View File

@@ -22,6 +22,7 @@ use kata_types::{
};
use persist::sandbox_persist::Persist;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::convert::TryInto;
use std::path::Path;
use std::process::Stdio;
@@ -287,14 +288,13 @@ impl QemuInner {
todo!()
}
pub(crate) async fn get_thread_ids(&mut self) -> Result<VcpuThreadIds> {
pub(crate) async fn get_thread_ids(&self) -> Result<VcpuThreadIds> {
info!(sl!(), "QemuInner::get_thread_ids()");
Ok(self
.qmp
.as_mut()
.and_then(|qmp| qmp.get_vcpu_thread_ids().ok())
.unwrap_or_default())
//todo!()
let vcpu_thread_ids: VcpuThreadIds = VcpuThreadIds {
vcpus: HashMap::new(),
};
Ok(vcpu_thread_ids)
}
pub(crate) async fn get_vmm_master_tid(&self) -> Result<u32> {
@@ -632,25 +632,17 @@ impl QemuInner {
qmp.hotplug_network_device(&netdev, &virtio_net_device)?
}
DeviceType::Block(mut block_device) => {
let (pci_path, scsi_addr) = qmp
block_device.config.pci_path = qmp
.hotplug_block_device(
&self.config.blockdev_info.block_device_driver,
block_device.config.index,
&block_device.device_id,
&block_device.config.path_on_host,
&block_device.config.blkdev_aio.to_string(),
block_device.config.is_direct,
block_device.config.is_readonly,
block_device.config.no_drop,
)
.context("hotplug block device")?;
if pci_path.is_some() {
block_device.config.pci_path = pci_path;
}
if scsi_addr.is_some() {
block_device.config.scsi_addr = scsi_addr;
}
return Ok(DeviceType::Block(block_device));
}
DeviceType::Vfio(mut vfiodev) => {

View File

@@ -135,7 +135,7 @@ impl Hypervisor for Qemu {
}
async fn get_thread_ids(&self) -> Result<VcpuThreadIds> {
let mut inner = self.inner.write().await;
let inner = self.inner.read().await;
inner.get_thread_ids().await
}

View File

@@ -5,24 +5,18 @@
use crate::device::pci_path::PciPath;
use crate::qemu::cmdline_generator::{DeviceVirtioNet, Netdev};
use crate::VcpuThreadIds;
use anyhow::{anyhow, Context, Result};
use kata_types::config::hypervisor::VIRTIO_SCSI;
use nix::sys::socket::{sendmsg, ControlMessage, MsgFlags};
use std::collections::HashMap;
use std::convert::TryFrom;
use std::fmt::{Debug, Error, Formatter};
use std::io::BufReader;
use std::os::fd::{AsRawFd, RawFd};
use std::os::unix::net::UnixStream;
use std::str::FromStr;
use std::time::Duration;
use qapi_qmp::{
self as qmp, BlockdevAioOptions, BlockdevOptions, BlockdevOptionsBase,
BlockdevOptionsGenericFormat, BlockdevOptionsRaw, BlockdevRef, PciDeviceInfo,
};
use qapi::qmp;
use qapi_qmp::{self, PciDeviceInfo};
use qapi_spec::Dictionary;
/// default qmp connection read timeout
@@ -499,7 +493,7 @@ impl Qmp {
Err(anyhow!("no target device found"))
}
/// Hotplug block device:
/// hotplug block device:
/// {
/// "execute": "blockdev-add",
/// "arguments": {
@@ -520,161 +514,90 @@ impl Qmp {
/// "bus": "pcie.1"
/// }
/// }
/// Hotplug SCSI block device
/// # virtio-scsi0
/// {"execute":"device_add","arguments":{"driver":"virtio-scsi-pci","id":"virtio-scsi0","bus":"bus1"}}
/// {"return": {}}
///
/// {"execute":"blockdev_add", "arguments": {"file":"/path/to/block.image","format":"qcow2","id":"virtio-scsi0"}}
/// {"return": {}}
/// {"execute":"device_add","arguments":{"driver":"scsi-hd","drive":"virtio-scsi0","id":"scsi_device_0","bus":"virtio-scsi1.0"}}
/// {"return": {}}
///
#[allow(clippy::too_many_arguments)]
pub fn hotplug_block_device(
&mut self,
block_driver: &str,
index: u64,
device_id: &str,
path_on_host: &str,
blkdev_aio: &str,
is_direct: Option<bool>,
is_readonly: bool,
no_drop: bool,
) -> Result<(Option<PciPath>, Option<String>)> {
) -> Result<Option<PciPath>> {
let (bus, slot) = self.find_free_slot()?;
// `blockdev-add`
let node_name = format!("drive-{index}");
let create_base_options = || qapi_qmp::BlockdevOptionsBase {
auto_read_only: None,
cache: if is_direct.is_none() {
None
} else {
Some(qapi_qmp::BlockdevCacheOptions {
direct: is_direct,
no_flush: None,
})
},
detect_zeroes: None,
discard: None,
force_share: None,
node_name: None,
read_only: Some(is_readonly),
};
let create_backend_options = || qapi_qmp::BlockdevOptionsFile {
aio: Some(
BlockdevAioOptions::from_str(blkdev_aio).unwrap_or(BlockdevAioOptions::io_uring),
),
aio_max_batch: None,
drop_cache: if !no_drop { None } else { Some(no_drop) },
locking: None,
pr_manager: None,
x_check_cache_dropped: None,
filename: path_on_host.to_owned(),
};
// Add block device backend and check if the file is a regular file or device
let blockdev_file = if std::fs::metadata(path_on_host)?.is_file() {
// Regular file
qmp::BlockdevOptions::file {
base: create_base_options(),
file: create_backend_options(),
}
} else {
// Host device (e.g., /dev/sdx, /dev/loopX)
qmp::BlockdevOptions::host_device {
base: create_base_options(),
host_device: create_backend_options(),
}
};
let blockdev_options_raw = BlockdevOptions::raw {
base: BlockdevOptionsBase {
detect_zeroes: None,
cache: None,
discard: None,
force_share: None,
auto_read_only: None,
node_name: Some(node_name.clone()),
read_only: None,
},
raw: BlockdevOptionsRaw {
base: BlockdevOptionsGenericFormat {
file: BlockdevRef::definition(Box::new(blockdev_file)),
},
offset: None,
size: None,
},
};
let node_name = format!("drive-{}", device_id);
self.qmp
.execute(&qapi_qmp::blockdev_add(blockdev_options_raw))
.map_err(|e| anyhow!("blockdev-add backend {:?}", e))
.execute(&qmp::blockdev_add(qmp::BlockdevOptions::raw {
base: qmp::BlockdevOptionsBase {
detect_zeroes: None,
cache: None,
discard: None,
force_share: None,
auto_read_only: None,
node_name: Some(node_name.clone()),
read_only: None,
},
raw: qmp::BlockdevOptionsRaw {
base: qmp::BlockdevOptionsGenericFormat {
file: qmp::BlockdevRef::definition(Box::new(qmp::BlockdevOptions::file {
base: qapi_qmp::BlockdevOptionsBase {
auto_read_only: None,
cache: if is_direct.is_none() {
None
} else {
Some(qapi_qmp::BlockdevCacheOptions {
direct: is_direct,
no_flush: None,
})
},
detect_zeroes: None,
discard: None,
force_share: None,
node_name: None,
read_only: Some(is_readonly),
},
file: qapi_qmp::BlockdevOptionsFile {
aio: None,
aio_max_batch: None,
drop_cache: if !no_drop { None } else { Some(no_drop) },
locking: None,
pr_manager: None,
x_check_cache_dropped: None,
filename: path_on_host.to_owned(),
},
})),
},
offset: None,
size: None,
},
}))
.map_err(|e| anyhow!("blockdev_add {:?}", e))
.map(|_| ())?;
// block device
// `device_add`
let mut blkdev_add_args = Dictionary::new();
blkdev_add_args.insert("addr".to_owned(), format!("{:02}", slot).into());
blkdev_add_args.insert("drive".to_owned(), node_name.clone().into());
self.qmp
.execute(&qmp::device_add {
bus: Some(bus),
id: Some(node_name.clone()),
driver: block_driver.to_string(),
arguments: blkdev_add_args,
})
.map_err(|e| anyhow!("device_add {:?}", e))
.map(|_| ())?;
if block_driver == VIRTIO_SCSI {
// Helper closure to decode a flattened u16 SCSI index into an (ID, LUN) pair.
let get_scsi_id_lun = |index_u16: u16| -> Result<(u8, u8)> {
// Uses bitwise operations for efficient and clear conversion.
let scsi_id = (index_u16 >> 8) as u8; // Equivalent to index_u16 / 256
let lun = (index_u16 & 0xFF) as u8; // Equivalent to index_u16 % 256
let pci_path = self
.get_device_by_qdev_id(&node_name)
.context("get device by qdev_id failed")?;
info!(
sl!(),
"hotplug_block_device return pci path: {:?}", &pci_path
);
Ok((scsi_id, lun))
};
// Safely convert the u64 index to u16, ensuring it does not exceed `u16::MAX` (65535).
let (scsi_id, lun) = get_scsi_id_lun(u16::try_from(index)?)?;
let scsi_addr = format!("{}:{}", scsi_id, lun);
// add SCSI frontend device
blkdev_add_args.insert("scsi-id".to_string(), scsi_id.into());
blkdev_add_args.insert("lun".to_string(), lun.into());
self.qmp
.execute(&qmp::device_add {
bus: Some("scsi0.0".to_string()),
id: Some(node_name.clone()),
driver: "scsi-hd".to_string(),
arguments: blkdev_add_args,
})
.map_err(|e| anyhow!("device_add {:?}", e))
.map(|_| ())?;
info!(
sl!(),
"hotplug scsi block device return scsi address: {:?}", &scsi_addr
);
Ok((None, Some(scsi_addr)))
} else {
let (bus, slot) = self.find_free_slot()?;
blkdev_add_args.insert("addr".to_owned(), format!("{:02}", slot).into());
self.qmp
.execute(&qmp::device_add {
bus: Some(bus),
id: Some(node_name.clone()),
driver: block_driver.to_string(),
arguments: blkdev_add_args,
})
.map_err(|e| anyhow!("device_add {:?}", e))
.map(|_| ())?;
let pci_path = self
.get_device_by_qdev_id(&node_name)
.context("get device by qdev_id failed")?;
info!(
sl!(),
"hotplug block device return pci path: {:?}", &pci_path
);
Ok((Some(pci_path), None))
}
Ok(Some(pci_path))
}
pub fn hotplug_vfio_device(
@@ -736,61 +659,6 @@ impl Qmp {
Ok(Some(pci_path))
}
/// Get vCPU thread IDs through QMP query_cpus_fast.
pub fn get_vcpu_thread_ids(&mut self) -> Result<VcpuThreadIds> {
let vcpu_info = self
.qmp
.execute(&qmp::query_cpus_fast {})
.map_err(|e| anyhow!("query_cpus_fast failed: {:?}", e))?;
let vcpus: HashMap<u32, u32> = vcpu_info
.iter()
.map(|info| match info {
qmp::CpuInfoFast::aarch64(cpu_info)
| qmp::CpuInfoFast::alpha(cpu_info)
| qmp::CpuInfoFast::arm(cpu_info)
| qmp::CpuInfoFast::avr(cpu_info)
| qmp::CpuInfoFast::cris(cpu_info)
| qmp::CpuInfoFast::hppa(cpu_info)
| qmp::CpuInfoFast::i386(cpu_info)
| qmp::CpuInfoFast::loongarch64(cpu_info)
| qmp::CpuInfoFast::m68k(cpu_info)
| qmp::CpuInfoFast::microblaze(cpu_info)
| qmp::CpuInfoFast::microblazeel(cpu_info)
| qmp::CpuInfoFast::mips(cpu_info)
| qmp::CpuInfoFast::mips64(cpu_info)
| qmp::CpuInfoFast::mips64el(cpu_info)
| qmp::CpuInfoFast::mipsel(cpu_info)
| qmp::CpuInfoFast::nios2(cpu_info)
| qmp::CpuInfoFast::or1k(cpu_info)
| qmp::CpuInfoFast::ppc(cpu_info)
| qmp::CpuInfoFast::ppc64(cpu_info)
| qmp::CpuInfoFast::riscv32(cpu_info)
| qmp::CpuInfoFast::riscv64(cpu_info)
| qmp::CpuInfoFast::rx(cpu_info)
| qmp::CpuInfoFast::sh4(cpu_info)
| qmp::CpuInfoFast::sh4eb(cpu_info)
| qmp::CpuInfoFast::sparc(cpu_info)
| qmp::CpuInfoFast::sparc64(cpu_info)
| qmp::CpuInfoFast::tricore(cpu_info)
| qmp::CpuInfoFast::x86_64(cpu_info)
| qmp::CpuInfoFast::xtensa(cpu_info)
| qmp::CpuInfoFast::xtensaeb(cpu_info) => {
let vcpu_id = cpu_info.cpu_index as u32;
let thread_id = cpu_info.thread_id as u32;
(vcpu_id, thread_id)
}
qmp::CpuInfoFast::s390x { base, .. } => {
let vcpu_id = base.cpu_index as u32;
let thread_id = base.thread_id as u32;
(vcpu_id, thread_id)
}
})
.collect();
Ok(VcpuThreadIds { vcpus })
}
}
fn vcpu_id_from_core_id(core_id: i64) -> String {

View File

@@ -14,8 +14,8 @@ use kata_types::{
cri_containerd::{SANDBOX_NAMESPACE_LABEL_KEY, SANDBOX_NAME_LABEL_KEY},
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_GPUS, KATA_ANNO_CFG_HYPERVISOR_DEFAULT_GPU_MODEL,
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_MEMORY, KATA_ANNO_CFG_HYPERVISOR_DEFAULT_VCPUS,
KATA_ANNO_CFG_HYPERVISOR_IMAGE_PATH, KATA_ANNO_CFG_HYPERVISOR_MACHINE_TYPE,
KATA_ANNO_CFG_RUNTIME_INIT_DATA,
KATA_ANNO_CFG_HYPERVISOR_IMAGE_PATH, KATA_ANNO_CFG_HYPERVISOR_INIT_DATA,
KATA_ANNO_CFG_HYPERVISOR_MACHINE_TYPE,
},
capabilities::{Capabilities, CapabilityBits},
};
@@ -127,7 +127,7 @@ impl RemoteInner {
config.boot_info.image.to_string(),
);
annotations.insert(
KATA_ANNO_CFG_RUNTIME_INIT_DATA.to_string(),
KATA_ANNO_CFG_HYPERVISOR_INIT_DATA.to_string(),
config.security_info.initdata.to_string(),
);
annotations.insert(

View File

@@ -17,7 +17,7 @@ anyhow = { workspace = true }
async-trait = { workspace = true }
bitflags = "2.9.0"
byte-unit = "5.1.6"
cgroups-rs = { version = "0.4.0", features = ["oci"] }
cgroups-rs = { git = "https://github.com/kata-containers/cgroups-rs", rev = "v0.3.5" }
futures = "0.3.11"
lazy_static = { workspace = true }
libc = { workspace = true }

View File

@@ -5,19 +5,30 @@
//
pub mod cgroup_persist;
mod resource;
pub use resource::CgroupsResource;
mod resource_inner;
mod utils;
use anyhow::{anyhow, Result};
use cgroups_rs::manager::is_systemd_cgroup;
use hypervisor::HYPERVISOR_DRAGONBALL;
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use cgroup_persist::CgroupState;
use cgroups_rs::{cgroup_builder::CgroupBuilder, Cgroup, CgroupPid, CpuResources, Resources};
use hypervisor::Hypervisor;
use kata_sys_util::spec::load_oci_spec;
use kata_types::config::TomlConfig;
use oci::LinuxResources;
use oci_spec::runtime as oci;
use persist::sandbox_persist::Persist;
use std::{
collections::{HashMap, HashSet},
error::Error,
io,
iter::FromIterator,
sync::Arc,
};
use tokio::sync::RwLock;
use crate::cgroups::cgroup_persist::CgroupState;
use crate::ResourceUpdateOp;
const OS_ERROR_NO_SUCH_PROCESS: i32 = 3;
const SANDBOXED_CGROUP_PATH: &str = "kata_sandboxed_pod";
pub struct CgroupArgs {
@@ -33,6 +44,7 @@ pub struct CgroupConfig {
impl CgroupConfig {
fn new(sid: &str, toml_config: &TomlConfig) -> Result<Self> {
let overhead_path = utils::gen_overhead_path(sid);
let path = if let Ok(spec) = load_oci_spec() {
spec.linux()
.clone()
@@ -48,38 +60,260 @@ impl CgroupConfig {
} else {
format!("{}/{}", SANDBOXED_CGROUP_PATH, sid)
};
let overhead_path = utils::gen_overhead_path(is_systemd_cgroup(&path), sid);
// Dragonball and runtime are the same process, so that the
// sandbox_cgroup_only is overwriten to true.
let sandbox_cgroup_only = if toml_config.runtime.hypervisor_name == HYPERVISOR_DRAGONBALL {
true
} else {
toml_config.runtime.sandbox_cgroup_only
};
Ok(Self {
path,
overhead_path,
sandbox_cgroup_only,
})
}
fn restore(state: &CgroupState) -> Result<Self> {
let path = state
.path
.as_ref()
.ok_or_else(|| anyhow!("cgroup path is missing in state"))?;
let overhead_path = state
.overhead_path
.as_ref()
.ok_or_else(|| anyhow!("overhead path is missing in state"))?;
Ok(Self {
path: path.clone(),
overhead_path: overhead_path.clone(),
sandbox_cgroup_only: state.sandbox_cgroup_only,
sandbox_cgroup_only: toml_config.runtime.sandbox_cgroup_only,
})
}
}
pub struct CgroupsResource {
resources: Arc<RwLock<HashMap<String, Resources>>>,
cgroup_manager: Cgroup,
overhead_cgroup_manager: Option<Cgroup>,
cgroup_config: CgroupConfig,
}
impl CgroupsResource {
pub fn new(sid: &str, toml_config: &TomlConfig) -> Result<Self> {
let config = CgroupConfig::new(sid, toml_config)?;
// Create the sandbox cgroups manager (cgroups on Linux).
// Depending on the sandbox_cgroup_only value, this cgroup
// will either hold all the pod threads (sandbox_cgroup_only is true)
// or only the virtual CPU ones (sandbox_cgroup_only is false).
let hier = cgroups_rs::hierarchies::auto();
let cgroup_manager = CgroupBuilder::new(&config.path).build(hier)?;
// The shim configuration is requesting that we do not put all threads
// into the sandbox resource controller.
// We're creating an overhead controller, with no constraints. Everything but
// the vCPU threads will eventually make it there.
let overhead_cgroup_manager = if !config.sandbox_cgroup_only {
let hier = cgroups_rs::hierarchies::auto();
Some(CgroupBuilder::new(&config.overhead_path).build(hier)?)
} else {
None
};
// Add the runtime to the VMM sandbox resource controller
// By adding the runtime process to either the sandbox or overhead controller, we are making
// sure that any child process of the runtime (i.e. *all* processes serving a Kata pod)
// will initially live in this controller. Depending on the sandbox_cgroup_only settings, we will
// then move the vCPU threads between resource controllers.
let pid = CgroupPid { pid: 0 };
if let Some(manager) = overhead_cgroup_manager.as_ref() {
manager.add_task_by_tgid(pid).context("add task by tgid")?;
} else {
cgroup_manager
.add_task_by_tgid(pid)
.context("add task by tgid with sandbox only")?;
}
Ok(Self {
cgroup_manager,
resources: Arc::new(RwLock::new(HashMap::new())),
overhead_cgroup_manager,
cgroup_config: config,
})
}
/// delete will move the running processes in the cgroup_manager and
/// overhead_cgroup_manager to the parent and then delete the cgroups.
pub async fn delete(&self) -> Result<()> {
for cg_pid in self.cgroup_manager.procs() {
// For now, we can't guarantee that the process in cgroup_manager does still
// exist. Once it exit, we should ignore that error returned by remove_task_by_tgid
// to let it go.
if let Err(error) = self.cgroup_manager.remove_task_by_tgid(cg_pid) {
match error.source() {
Some(err) => match err.downcast_ref::<io::Error>() {
Some(e) => {
if e.raw_os_error() != Some(OS_ERROR_NO_SUCH_PROCESS) {
return Err(error.into());
}
}
None => return Err(error.into()),
},
None => return Err(error.into()),
}
}
}
self.cgroup_manager
.delete()
.context("delete cgroup manager")?;
if let Some(overhead) = self.overhead_cgroup_manager.as_ref() {
for cg_pid in overhead.tasks() {
overhead.remove_task(cg_pid)?;
}
overhead.delete().context("delete overhead")?;
}
Ok(())
}
pub async fn update_cgroups(
&self,
cid: &str,
linux_resources: Option<&LinuxResources>,
op: ResourceUpdateOp,
h: &dyn Hypervisor,
) -> Result<()> {
let new_resources = self.calc_resource(linux_resources);
let old_resources = self.update_resources(cid, new_resources.clone(), op).await;
if let Some(old_resource) = old_resources.clone() {
if old_resource == new_resources {
return Ok(());
}
}
match self.do_update_cgroups(h).await {
Err(e) => {
// if update failed, we should roll back the records in resources
let mut resources = self.resources.write().await;
match op {
ResourceUpdateOp::Add => {
resources.remove(cid);
}
ResourceUpdateOp::Update | ResourceUpdateOp::Del => {
if let Some(old_resource) = old_resources {
resources.insert(cid.to_owned(), old_resource);
}
}
}
Err(e)
}
Ok(()) => Ok(()),
}
}
async fn update_resources(
&self,
cid: &str,
new_resource: Resources,
op: ResourceUpdateOp,
) -> Option<Resources> {
let mut resources = self.resources.write().await;
match op {
ResourceUpdateOp::Add | ResourceUpdateOp::Update => {
resources.insert(cid.to_owned(), new_resource.clone())
}
ResourceUpdateOp::Del => resources.remove(cid),
}
}
async fn do_update_cgroups(&self, h: &dyn Hypervisor) -> Result<()> {
let merged_resources = self.merge_resources().await;
self.cgroup_manager
.apply(&merged_resources)
.map_err(|e| anyhow!(e))?;
if self.overhead_cgroup_manager.is_some() {
// If we have an overhead controller, new vCPU threads would start there,
// as being children of the VMM PID.
// We need to constrain them by moving them into the sandbox controller.
self.constrain_hypervisor(h).await?
}
Ok(())
}
/// constrain_hypervisor will place the VMM and vCPU threads into resource controllers (cgroups on Linux).
async fn constrain_hypervisor(&self, h: &dyn Hypervisor) -> Result<()> {
let tids = h.get_thread_ids().await?;
let tids = tids.vcpus.values();
// All vCPU threads move to the sandbox controller.
for tid in tids {
self.cgroup_manager
.add_task(CgroupPid { pid: *tid as u64 })?
}
Ok(())
}
async fn merge_resources(&self) -> Resources {
let resources = self.resources.read().await;
let mut cpu_list: HashSet<String> = HashSet::new();
let mut mem_list: HashSet<String> = HashSet::new();
resources.values().for_each(|r| {
if let Some(cpus) = &r.cpu.cpus {
cpu_list.insert(cpus.clone());
}
if let Some(mems) = &r.cpu.mems {
mem_list.insert(mems.clone());
}
});
let cpu_resource = CpuResources {
cpus: Some(Vec::from_iter(cpu_list.into_iter()).join(",")),
mems: Some(Vec::from_iter(mem_list.into_iter()).join(",")),
..Default::default()
};
Resources {
cpu: cpu_resource,
..Default::default()
}
}
fn calc_cpu_resources(&self, linux_resources: Option<&LinuxResources>) -> CpuResources {
let cpus = linux_resources
.and_then(|res| res.cpu().clone())
.and_then(|cpu| cpu.cpus().clone());
let mems = linux_resources
.and_then(|res| res.cpu().clone())
.and_then(|cpu| cpu.mems().clone());
CpuResources {
cpus,
mems,
..Default::default()
}
}
fn calc_resource(&self, linux_resources: Option<&LinuxResources>) -> Resources {
Resources {
cpu: self.calc_cpu_resources(linux_resources),
..Default::default()
}
}
}
#[async_trait]
impl Persist for CgroupsResource {
type State = CgroupState;
type ConstructorArgs = CgroupArgs;
/// Save a state of the component.
async fn save(&self) -> Result<Self::State> {
Ok(CgroupState {
path: Some(self.cgroup_config.path.clone()),
overhead_path: Some(self.cgroup_config.overhead_path.clone()),
sandbox_cgroup_only: self.cgroup_config.sandbox_cgroup_only,
})
}
/// Restore a component from a specified state.
async fn restore(
cgroup_args: Self::ConstructorArgs,
cgroup_state: Self::State,
) -> Result<Self> {
let hier = cgroups_rs::hierarchies::auto();
let config = CgroupConfig::new(&cgroup_args.sid, &cgroup_args.config)?;
let path = cgroup_state.path.unwrap_or_default();
let cgroup_manager = Cgroup::load(hier, path.as_str());
Ok(Self {
cgroup_manager,
resources: Arc::new(RwLock::new(HashMap::new())),
overhead_cgroup_manager: None,
cgroup_config: config,
})
}
}

View File

@@ -1,99 +0,0 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2025 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::sync::Arc;
use anyhow::{Context, Result};
use async_trait::async_trait;
use hypervisor::Hypervisor;
use kata_types::config::TomlConfig;
use oci_spec::runtime::LinuxResources;
use persist::sandbox_persist::Persist;
use tokio::sync::RwLock;
use crate::cgroups::cgroup_persist::CgroupState;
use crate::cgroups::resource_inner::CgroupsResourceInner;
use crate::cgroups::{CgroupArgs, CgroupConfig};
use crate::ResourceUpdateOp;
/// CgroupsResource manages sandbox cgroup and overhead cgroup.
///
/// Putting the processes under the cgroup from OCI spec (a.k.a sandbox
/// cgroup) by default. The container runtime (e.g. containerd) imposes
/// limits on the parent of that cgroup. In case of disabling
/// `sandbox_cgroup_only`, the runtime and other components except for VMM
/// (e.g. virtiofsd) are put under the overhead cgroup, which no resource
/// limits are imposed on it.
pub struct CgroupsResource {
cgroup_config: CgroupConfig,
inner: Arc<RwLock<CgroupsResourceInner>>,
}
impl CgroupsResource {
pub fn new(sid: &str, toml_config: &TomlConfig) -> Result<Self> {
let cgroup_config = CgroupConfig::new(sid, toml_config)?;
let inner = CgroupsResourceInner::new(&cgroup_config)?;
let inner = Arc::new(RwLock::new(inner));
Ok(Self {
cgroup_config,
inner,
})
}
}
impl CgroupsResource {
pub async fn delete(&self) -> Result<()> {
let mut inner = self.inner.write().await;
inner.delete().await
}
pub async fn update(
&self,
cid: &str,
resources: Option<&LinuxResources>,
op: ResourceUpdateOp,
hypervisor: &dyn Hypervisor,
) -> Result<()> {
let mut inner = self.inner.write().await;
inner.update(cid, resources, op, hypervisor).await
}
pub async fn setup_after_start_vm(&self, hypervisor: &dyn Hypervisor) -> Result<()> {
let mut inner = self.inner.write().await;
inner.setup_after_start_vm(hypervisor).await
}
}
#[async_trait]
impl Persist for CgroupsResource {
type State = CgroupState;
type ConstructorArgs = CgroupArgs;
/// Save a state of the component.
async fn save(&self) -> Result<Self::State> {
Ok(CgroupState {
path: Some(self.cgroup_config.path.clone()),
overhead_path: Some(self.cgroup_config.overhead_path.clone()),
sandbox_cgroup_only: self.cgroup_config.sandbox_cgroup_only,
})
}
/// Restore a component from a specified state.
async fn restore(
_cgroup_args: Self::ConstructorArgs,
cgroup_state: Self::State,
) -> Result<Self> {
let cgroup_config = CgroupConfig::restore(&cgroup_state)?;
let inner = CgroupsResourceInner::restore(&cgroup_config)
.context("restore cgroups resource inner")?;
let inner = Arc::new(RwLock::new(inner));
Ok(Self {
cgroup_config,
inner,
})
}
}

View File

@@ -1,298 +0,0 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2025 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::collections::{HashMap, HashSet};
use std::process;
use anyhow::{anyhow, Context, Result};
use cgroups_rs::manager::is_systemd_cgroup;
use cgroups_rs::{CgroupPid, FsManager, Manager, SystemdManager};
use hypervisor::Hypervisor;
use oci_spec::runtime::{LinuxCpu, LinuxCpuBuilder, LinuxResources, LinuxResourcesBuilder};
use crate::cgroups::utils::get_tgid_from_pid;
use crate::cgroups::CgroupConfig;
use crate::ResourceUpdateOp;
pub type CgroupManager = Box<dyn Manager>;
pub(crate) struct CgroupsResourceInner {
/// Container resources, key is container id, and value is resources.
resources: HashMap<String, LinuxResources>,
sandbox_cgroup: CgroupManager,
overhead_cgroup: Option<CgroupManager>,
}
impl CgroupsResourceInner {
/// Create cgroup managers according to the cgroup configuration.
///
/// # Returns
///
/// - `Ok((CgroupManager, Option<CgroupManager>))`: A tuple containing
/// the sandbox cgroup manager and an optional overhead cgroup
/// manager.
fn new_cgroup_managers(
config: &CgroupConfig,
) -> Result<(CgroupManager, Option<CgroupManager>)> {
let use_systemd = is_systemd_cgroup(&config.path);
let sandbox_cgroup = if use_systemd {
let mut manager = SystemdManager::new(&config.path).context("new systemd manager")?;
// Set SIGTERM timeout to 5mins, so that the runtime has up to
// 5mins to do graceful shutdown. Exceeding this timeout, the
// systemd will forcibly kill the runtime by sending SIGKILL.
manager.set_term_timeout(300).context("set term timeout")?;
Box::new(manager) as Box<dyn Manager>
} else {
let manager = FsManager::new(&config.path).context("new fs manager")?;
Box::new(manager) as Box<dyn Manager>
};
let overhead_cgroup = if config.sandbox_cgroup_only {
None
} else if use_systemd {
let mut manager = SystemdManager::new(&config.overhead_path)
.context("new systemd manager for overhead")?;
manager
.set_term_timeout(300)
.context("set term timeout for overhead")?;
Some(Box::new(manager) as Box<dyn Manager>)
} else {
let manager =
FsManager::new(&config.overhead_path).context("new fs manager for overhead")?;
Some(Box::new(manager) as Box<dyn Manager>)
};
Ok((sandbox_cgroup, overhead_cgroup))
}
/// Create a new `CgroupsResourceInner` instance.
pub(crate) fn new(config: &CgroupConfig) -> Result<Self> {
let (mut sandbox_cgroup, mut overhead_cgroup) =
Self::new_cgroup_managers(config).context("create new cgroups")?;
// The runtime is prioritized to be added to the overhead cgroup.
let pid = CgroupPid::from(process::id() as u64);
if let Some(overhead_cgroup) = overhead_cgroup.as_mut() {
overhead_cgroup
.add_proc(pid)
.context("add runtime to overhead cgroup")?;
} else {
sandbox_cgroup
.add_proc(pid)
.context("add runtime to sandbox cgroup")?;
}
Ok(Self {
resources: HashMap::new(),
sandbox_cgroup,
overhead_cgroup,
})
}
pub(crate) fn restore(config: &CgroupConfig) -> Result<Self> {
let (sandbox_cgroup, overhead_cgroup) =
Self::new_cgroup_managers(config).context("restore cgroups")?;
Ok(Self {
resources: HashMap::new(),
sandbox_cgroup,
overhead_cgroup,
})
}
}
impl CgroupsResourceInner {
/// Add cpuset resources of all containers to the sandbox cgroup.
fn collect_resources(&self) -> Result<LinuxResources> {
let mut cpu_cpus = HashSet::new();
let mut cpu_mems = HashSet::new();
for res in self.resources.values() {
if let Some(cpu) = res.cpu() {
if let Some(cpus) = cpu.cpus() {
cpu_cpus.insert(cpus.to_string());
}
if let Some(mems) = cpu.mems() {
cpu_mems.insert(mems.to_string());
}
}
}
let mut resources_builder = LinuxResourcesBuilder::default();
let mut cpu_builder = LinuxCpuBuilder::default();
if !cpu_cpus.is_empty() {
cpu_builder = cpu_builder.cpus(cpu_cpus.into_iter().collect::<Vec<_>>().join(","));
}
if !cpu_mems.is_empty() {
cpu_builder = cpu_builder.mems(cpu_mems.into_iter().collect::<Vec<_>>().join(","));
}
let cpu = cpu_builder.build().context("build linux cpu")?;
if cpu != LinuxCpu::default() {
resources_builder = resources_builder.cpu(cpu);
}
let resources = resources_builder.build().context("build linux resources")?;
Ok(resources)
}
async fn move_vcpus_to_sandbox_cgroup(&mut self, hypervisor: &dyn Hypervisor) -> Result<usize> {
let hv_pids = hypervisor.get_thread_ids().await?;
let mut pids = hv_pids.vcpus.values();
// Use threaded mode only in cgroup v1 + cgroupfs
if !self.sandbox_cgroup.systemd() && !self.sandbox_cgroup.v2() {
for pid in pids {
let pid = CgroupPid::from(*pid as u64);
self.sandbox_cgroup
.add_thread(pid)
.with_context(|| format!("add vcpu pid {}", pid.pid))?
}
} else {
// No vCPU, exits early
let vcpu = match pids.next() {
Some(pid) => *pid,
None => return Ok(0),
};
let tgid = get_tgid_from_pid(vcpu as i32).context("get tgid from vCPU thread")? as u64;
self.sandbox_cgroup
.add_proc(CgroupPid::from(tgid))
.with_context(|| format!("add vcpu tgid {}", tgid))?;
}
Ok(hv_pids.vcpus.len())
}
async fn update_sandbox_cgroups(&mut self, hypervisor: &dyn Hypervisor) -> Result<bool> {
// The runtime is under overhead cgroup if available. The
// hypervisor as a child of the runtime is under the overhead
// cgroup by default. We should move VMM process/vCPU threads to
// the sandbox cgroup to prevent them from consuming excessive
// resources.
if self.overhead_cgroup.is_some() {
let vcpu_num = self
.move_vcpus_to_sandbox_cgroup(hypervisor)
.await
.context("move vcpus to sandbox cgroup")?;
// The cgroup managers will not create cgroups if no processes
// are added to it. `vcpu_num == 0` reflects that the
// hypervisor hasn't been started yet. We skip resource
// setting, as the sandbox cgroup might not be created yet.
if vcpu_num == 0 {
return Ok(false);
}
}
let sandbox_resources = self.collect_resources().context("collect resources")?;
self.sandbox_cgroup.set(&sandbox_resources).context("set")?;
Ok(true)
}
}
impl CgroupsResourceInner {
pub(crate) async fn delete(&mut self) -> Result<()> {
self.sandbox_cgroup
.destroy()
.context("destroy sandbox cgroup")?;
if let Some(overhead_cgroup) = self.overhead_cgroup.as_mut() {
overhead_cgroup
.destroy()
.context("destroy overhead cgroup")?;
}
Ok(())
}
pub(crate) async fn update(
&mut self,
cid: &str,
resources: Option<&LinuxResources>,
op: ResourceUpdateOp,
hypervisor: &dyn Hypervisor,
) -> Result<()> {
let old = match op {
ResourceUpdateOp::Add | ResourceUpdateOp::Update => {
let resources = resources.ok_or_else(|| {
anyhow::anyhow!("resources should not be empty for Add or Update operation")
})?;
let new = new_cpuset_resources(resources).context("new cpuset resources")?;
let old = self.resources.insert(cid.to_string(), new.clone());
// If the new resources are the same as the old ones, we
// can skip the update.
if let Some(old) = old.as_ref() {
if old == &new {
return Ok(());
}
}
old
}
ResourceUpdateOp::Del => self.resources.remove(cid),
};
let ret = self
.update_sandbox_cgroups(hypervisor)
.await
.context("update sandbox cgroups");
// Rollback if the update fails
if ret.is_err() {
match op {
ResourceUpdateOp::Add => {
self.resources.remove(cid);
}
ResourceUpdateOp::Update | ResourceUpdateOp::Del => {
if let Some(old) = old {
self.resources.insert(cid.to_string(), old);
}
}
}
}
ret.map(|_| ())
}
pub(crate) async fn setup_after_start_vm(&mut self, hypervisor: &dyn Hypervisor) -> Result<()> {
let updated = self
.update_sandbox_cgroups(hypervisor)
.await
.context("update sandbox cgroups after start vm")?;
// There is an overhead cgroup and we are falling to move the vCPUs
// to the sandbox cgroup, it results in those threads being under
// the overhead cgroup, and allowing them to consume more resources
// than we have allocated for the sandbox.
if self.overhead_cgroup.is_some() && !updated {
return Err(anyhow!("hypervisor cannot be moved to sandbox cgroup"));
}
Ok(())
}
}
/// Copy cpu.cpus and cpu.mems from the given resources to new resources.
fn new_cpuset_resources(resources: &LinuxResources) -> Result<LinuxResources> {
let cpu = resources.cpu();
let cpus = cpu.as_ref().and_then(|c| c.cpus().clone());
let mems = cpu.as_ref().and_then(|c| c.mems().clone());
let mut builder = LinuxCpuBuilder::default();
if let Some(cpus) = cpus {
builder = builder.cpus(cpus);
}
if let Some(mems) = mems {
builder = builder.mems(mems);
}
let linux_cpu = builder.build().context("build linux cpu")?;
let builder = LinuxResourcesBuilder::default().cpu(linux_cpu);
let resources = builder.build().context("build linux resources")?;
Ok(resources)
}

View File

@@ -1,11 +1,9 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2025 Ant Group
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use anyhow::{anyhow, Context, Result};
// When the Kata overhead threads (I/O, VMM, etc) are not
// placed in the sandbox resource controller (A cgroup on Linux),
// they are moved to a specific, unconstrained resource controller.
@@ -13,55 +11,6 @@ use anyhow::{anyhow, Context, Result};
// on a cgroup v1 system, the Kata overhead memory cgroup will be at
// /sys/fs/cgroup/memory/kata_overhead/$CGPATH where $CGPATH is
// defined by the orchestrator.
pub(crate) fn gen_overhead_path(systemd: bool, path: &str) -> String {
if systemd {
format!("kata-overhead.slice:runtime-rs:{}", path)
} else {
format!("kata_overhead/{}", path.trim_start_matches('/'))
}
}
/// Get the thread group ID (TGID) from `/proc/{pid}/status`.
pub(crate) fn get_tgid_from_pid(pid: i32) -> Result<i32> {
let status = std::fs::read_to_string(format!("/proc/{}/status", pid))
.map_err(|e| anyhow!("failed to read /proc/{}/status: {}", pid, e))?;
status
.lines()
.find_map(|line| {
if line.starts_with("Tgid") {
let part = line.split(":").nth(1)?;
part.trim().parse::<i32>().ok()
} else {
None
}
})
.ok_or(anyhow!("tgid not found"))
.with_context(|| anyhow!("failed to parse tgid"))
}
#[cfg(test)]
mod tests {
use crate::cgroups::utils::*;
#[test]
fn test_gen_overhead_path() {
let systemd = true;
let path = "kata_sandboxed_pod";
let expected = "kata-overhead.slice:runtime-rs:kata_sandboxed_pod";
let actual = gen_overhead_path(systemd, path);
assert_eq!(actual, expected);
let systemd = false;
let expected = "kata_overhead/kata_sandboxed_pod";
let actual = gen_overhead_path(systemd, path);
assert_eq!(actual, expected);
}
#[test]
fn test_get_tgid_from_pid() {
let pid = unsafe { libc::gettid() };
let expected = unsafe { libc::getpid() };
let actual = get_tgid_from_pid(pid).unwrap();
assert_eq!(actual, expected);
}
pub(crate) fn gen_overhead_path(path: &str) -> String {
format!("kata_overhead/{}", path.trim_start_matches('/'))
}

View File

@@ -25,7 +25,7 @@ pub struct CpuResource {
pub(crate) current_vcpu: Arc<RwLock<u32>>,
/// Default number of vCPUs
pub(crate) default_vcpu: f32,
pub(crate) default_vcpu: u32,
/// CpuResource of each container
pub(crate) container_cpu_resources: Arc<RwLock<HashMap<String, LinuxContainerCpuResources>>>,
@@ -40,7 +40,7 @@ impl CpuResource {
.context(format!("failed to get hypervisor {}", hypervisor_name))?;
Ok(Self {
current_vcpu: Arc::new(RwLock::new(hypervisor_config.cpu_info.default_vcpus as u32)),
default_vcpu: hypervisor_config.cpu_info.default_vcpus,
default_vcpu: hypervisor_config.cpu_info.default_vcpus as u32,
container_cpu_resources: Arc::new(RwLock::new(HashMap::new())),
})
}
@@ -117,66 +117,27 @@ impl CpuResource {
// calculates the total required vcpus by adding each container's requirements within the pod
async fn calc_cpu_resources(&self) -> Result<u32> {
let resources = self.container_cpu_resources.read().await;
if resources.is_empty() {
return Ok(self.default_vcpu.ceil() as u32);
}
// If requests of individual containers are expresses with different
// periods we'll need to rewrite them with a common denominator
// (period) before we can add the numerators (quotas). We choose
// to use the largest period as the common denominator since it
// shifts precision out of the fractional part and into the
// integral part in case a rewritten quota ends up non-integral.
let max_period = resources
.iter()
.map(|(_, cpu_resource)| cpu_resource.period())
.max()
// It's ok to unwrap() here as we have checked that 'resources' is
// not empty.
.unwrap() as f64;
let mut total_vcpu = 0;
let mut cpuset_vcpu: HashSet<u32> = HashSet::new();
// Even though summing up quotas is fixed-point conceptually we
// represent the sum as floating-point because
// - we might be rewriting the quota/period fractions if periods
// vary,and a rewritten quota can end up non-integral. We want
// to preserve the fractional parts until the final rounding
// not to lose precision inadvertenty.
// - also to avoid some tedious casting doing maths with quotas.
// Using a 64-bit float to represent what are conceptually integral
// numbers should be safe here - f64 starts losing precision for
// integers only past 2^53 and a sums of quotas are extremely unlikely
// to reach that magnitude.
let mut total_quota: f64 = 0.0;
let resources = self.container_cpu_resources.read().await;
for (_, cpu_resource) in resources.iter() {
let vcpu = cpu_resource.get_vcpus().unwrap_or(0) as u32;
cpuset_vcpu.extend(cpu_resource.cpuset().iter());
let quota = cpu_resource.quota() as f64;
let period = cpu_resource.period() as f64;
if quota >= 0.0 && period != 0.0 {
total_quota += quota * (max_period / period);
}
total_vcpu += vcpu;
}
// contrained only by cpuset
if total_quota == 0.0 && !cpuset_vcpu.is_empty() {
if total_vcpu == 0 && !cpuset_vcpu.is_empty() {
info!(sl!(), "(from cpuset)get vcpus # {:?}", cpuset_vcpu);
return Ok(cpuset_vcpu.len() as u32);
}
let total_vcpu = if total_quota > 0.0 && max_period != 0.0 {
self.default_vcpu as f64 + total_quota / max_period
} else {
self.default_vcpu as f64
};
info!(
sl!(),
"(from cfs_quota&cfs_period)get vcpus count {}", total_vcpu
);
Ok(total_vcpu.ceil() as u32)
Ok(total_vcpu)
}
// do hotplug and hot-unplug the vcpu
@@ -198,7 +159,7 @@ impl CpuResource {
// do not reduce computing power
// the number of vcpus would not be lower than the default size
let new_vcpus = cmp::max(new_vcpus, self.default_vcpu.ceil() as u32);
let new_vcpus = cmp::max(new_vcpus, self.default_vcpu);
let (_, new) = hypervisor
.resize_vcpu(old_vcpus, new_vcpus)
@@ -208,238 +169,3 @@ impl CpuResource {
Ok(new)
}
}
#[cfg(test)]
mod tests {
use super::*;
use kata_types::config::{Hypervisor, TomlConfig};
use oci::LinuxCpu;
fn get_cpu_resource_with_default_vcpus(default_vcpus: f32) -> CpuResource {
let mut config = TomlConfig::default();
config
.hypervisor
.insert("qemu".to_owned(), Hypervisor::default());
config
.hypervisor
.entry("qemu".to_owned())
.and_modify(|hv_config| hv_config.cpu_info.default_vcpus = default_vcpus);
config.runtime.hypervisor_name = "qemu".to_owned();
CpuResource::new(Arc::new(config)).unwrap()
}
async fn add_linux_container_cpu_resources(cpu_res: &mut CpuResource, res: Vec<(i64, u64)>) {
let mut resources = cpu_res.container_cpu_resources.write().await;
for (i, (quota, period)) in res.iter().enumerate() {
let mut linux_cpu = LinuxCpu::default();
linux_cpu.set_quota(Some(*quota));
linux_cpu.set_period(Some(*period));
let res = LinuxContainerCpuResources::try_from(&linux_cpu).unwrap();
resources.insert(i.to_string(), res);
}
}
// A lot of the following tests document why a fixed-point-style
// calc_cpu_resources() implementation is better than a f32-based one.
#[tokio::test]
async fn test_rounding() {
let mut cpu_resource = get_cpu_resource_with_default_vcpus(0.0);
// A f32-based calc_cpu_resources() implementation would fail this
// test (adding 0.1 ten times gives roughly 1.0000001).
// An f64-based implementation would pass this one (with the summation
// result of 0.99999999999999989) but it still doesn't guarantee the
// correct result in general. For instance, adding 0.1 twenty times
// in 64 bits results in 2.0000000000000004.
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
(100_000, 1_000_000),
],
)
.await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 1);
}
#[tokio::test]
async fn test_big_allocation_1() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(32_000_000, 1_000_000),
(32_000_000, 1_000_000),
(64_000_000, 1_000_000),
],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
128 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_big_allocation_2() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(33_000_000, 1_000_000),
(31_000_000, 1_000_000),
(77_000_011, 1_000_000),
],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
(33 + 31 + 77 + 1) + default_vcpus as u32
);
}
#[tokio::test]
async fn test_big_allocation_3() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(141_000_008, 1_000_000)]).await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
142 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_big_allocation_4() {
let default_vcpus = 10.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![
(17_000_001, 1_000_000),
(17_000_001, 1_000_000),
(17_000_001, 1_000_000),
(17_000_001, 1_000_000),
],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
(4 * 17 + 1) + default_vcpus as u32
);
}
#[tokio::test]
async fn test_divisible_periods() {
let default_vcpus = 3.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(1_000_000, 1_000_000), (1_000_000, 500_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
3 + default_vcpus as u32
);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(3_000_000, 1_500_000), (1_000_000, 500_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
4 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_indivisible_periods() {
let default_vcpus = 1.0;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(1_000_000, 1_000_000), (900_000, 300_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
4 + default_vcpus as u32
);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(1_000_000, 1_000_000), (900_000, 299_999)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
5 + default_vcpus as u32
);
}
#[tokio::test]
async fn test_fractional_default_vcpus() {
let default_vcpus = 0.5;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(250_000, 1_000_000)]).await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 1);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(500_000, 1_000_000)]).await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 1);
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(&mut cpu_resource, vec![(500_001, 1_000_000)]).await;
assert_eq!(cpu_resource.calc_cpu_resources().await.unwrap(), 2);
// This test doesn't pass because 0.1 is periodic in binary and thus
// not exactly representable by a float of any width for fundamental
// reasons. Its actual representation is slightly over 0.1
// (e.g. 0.100000001 in f32), which after adding the 900_000/1_000_000
// container request pushes the sum over 1.
// I don't think this problem is solvable without expressing
// 'default_vcpus' in configuration.toml in a fixed point manner (e.g.
// as an integral percentage of a vCPU).
/*
let default_vcpus = 0.1;
let mut cpu_resource = get_cpu_resource_with_default_vcpus(default_vcpus);
add_linux_container_cpu_resources(
&mut cpu_resource,
vec![(900_000, 1_000_000)],
)
.await;
assert_eq!(
cpu_resource.calc_cpu_resources().await.unwrap(),
1
);
*/
}
}

View File

@@ -141,7 +141,7 @@ impl InitialSizeManager {
.context("failed to get hypervisor config")?;
if self.resource.vcpu > 0 {
hv.cpu_info.default_vcpus = self.resource.vcpu as f32
hv.cpu_info.default_vcpus = self.resource.vcpu as i32
}
self.resource.orig_toml_default_mem = hv.memory_info.default_memory;
if self.resource.mem_mb > 0 {

View File

@@ -288,11 +288,6 @@ impl ResourceManagerInner {
}
pub async fn setup_after_start_vm(&mut self) -> Result<()> {
self.cgroups_resource
.setup_after_start_vm(self.hypervisor.as_ref())
.await
.context("setup cgroups after start vm")?;
if let Some(share_fs) = self.share_fs.as_ref() {
share_fs
.setup_device_after_start_vm(self.hypervisor.as_ref(), &self.device_manager)
@@ -568,7 +563,7 @@ impl ResourceManagerInner {
// we should firstly update the vcpus and mems, and then update the host cgroups
self.cgroups_resource
.update(cid, linux_resources, op, self.hypervisor.as_ref())
.update_cgroups(cid, linux_resources, op, self.hypervisor.as_ref())
.await?;
if let Some(swap) = self.swap_resource.as_ref() {

View File

@@ -341,8 +341,11 @@ impl ShareFsVolume {
oci_mount.set_source(Some(PathBuf::from(&dest)));
oci_mount.set_options(m.options().clone());
volume.mounts.push(oci_mount);
} else if src.is_dir() {
// We allow directory copying wildly
} else if is_allowlisted_copy_volume(&src) {
// For security reasons, we have restricted directory copying. Currently, only directories under
// the path `/var/lib/kubelet/pods/<uid>/volumes/{kubernetes.io~configmap, kubernetes.io~secret, kubernetes.io~downward-api, kubernetes.io~projected}`
// are allowed to be copied into the guest. Copying of other directories will be prohibited.
// source_path: "/var/lib/kubelet/pods/6dad7281-57ff-49e4-b844-c588ceabec16/volumes/kubernetes.io~projected/kube-api-access-8s2nl"
info!(sl!(), "copying directory {:?} to guest", &source_path);
@@ -408,13 +411,11 @@ impl ShareFsVolume {
volume.mounts.push(oci_mount);
// start monitoring
if is_watchable_volume(&src) {
let watcher = FsWatcher::new(Path::new(&source_path)).await?;
let monitor_task = watcher
.start_monitor(agent.clone(), src.clone(), dest_dir.into())
.await;
volume.monitor_task = Some(monitor_task);
}
let watcher = FsWatcher::new(Path::new(&source_path)).await?;
let monitor_task = watcher
.start_monitor(agent.clone(), src.clone(), dest_dir.into())
.await;
volume.monitor_task = Some(monitor_task);
} else {
// If not, we can ignore it. Let's issue a warning so that the user knows.
warn!(
@@ -769,14 +770,14 @@ pub fn generate_mount_path(id: &str, file_name: &str) -> String {
format!("{}-{}-{}", nid, uid, file_name)
}
/// This function is used to check whether a given volume is a watchable volume.
/// More specifically, it determines whether the volume's path is located under
/// a predefined list of allowed copy directories.
pub(crate) fn is_watchable_volume(source_path: &PathBuf) -> bool {
/// This function is used to check whether a given volume is in the allowed copy allowlist.
/// More specifically, it determines whether the volume's path is located under a predefined
/// list of allowed copy directories.
pub(crate) fn is_allowlisted_copy_volume(source_path: &PathBuf) -> bool {
if !source_path.is_dir() {
return false;
}
// watchable list: { kubernetes.io~projected, kubernetes.io~configmap, kubernetes.io~secret, kubernetes.io~downward-api }
// allowlist: { kubernetes.io~projected, kubernetes.io~configmap, kubernetes.io~secret, kubernetes.io~downward-api }
is_projected(source_path)
|| is_downward_api(source_path)
|| is_secret(source_path)
@@ -803,7 +804,7 @@ mod test {
}
#[test]
fn test_is_watchable_volume() {
fn test_is_allowlisted_copy_volume() {
// The configmap is /var/lib/kubelet/pods/<uid>/volumes/kubernetes.io~configmap/kube-configmap-0s2no/{..data, key1, key2,...}
// The secret is /var/lib/kubelet/pods/<uid>/volumes/kubernetes.io~secret/kube-secret-2s2np/{..data, key1, key2,...}
// The projected is /var/lib/kubelet/pods/<uid>/volumes/kubernetes.io~projected/kube-api-access-8s2nl/{..data, key1, key2,...}
@@ -826,9 +827,9 @@ mod test {
let downward_api_path = temp_dir.path().join(downward_api);
std::fs::create_dir_all(&downward_api_path).unwrap();
assert!(is_watchable_volume(&cm_path));
assert!(is_watchable_volume(&secret_path));
assert!(is_watchable_volume(&projected_path));
assert!(is_watchable_volume(&downward_api_path));
assert!(is_allowlisted_copy_volume(&cm_path));
assert!(is_allowlisted_copy_volume(&secret_path));
assert!(is_allowlisted_copy_volume(&projected_path));
assert!(is_allowlisted_copy_volume(&downward_api_path));
}
}

View File

@@ -26,7 +26,6 @@ pub const DEFAULT_VOLUME_FS_TYPE: &str = "ext4";
pub const KATA_MOUNT_BIND_TYPE: &str = "bind";
pub const KATA_BLK_DEV_TYPE: &str = "blk";
pub const KATA_SCSI_DEV_TYPE: &str = "scsi";
pub fn get_file_name<P: AsRef<Path>>(src: P) -> Result<String> {
let file_name = src
@@ -100,13 +99,6 @@ pub async fn handle_block_volume(
return Err(anyhow!("block driver is blk but no pci path exists"));
}
}
KATA_SCSI_DEV_TYPE => {
if let Some(scsi_addr) = device.config.scsi_addr {
scsi_addr.to_string()
} else {
return Err(anyhow!("block driver is scsi but no scsi address exists"));
}
}
_ => device.config.virt_path,
};
device_id = device.device_id;

View File

@@ -31,7 +31,7 @@ serde_json = { workspace = true }
nix = "0.25.0"
url = { workspace = true }
procfs = "0.12.0"
prometheus = { version = "0.14.0", features = ["process"] }
prometheus = { version = "0.13.0", features = ["process"] }
oci-spec = { workspace = true }
# Local dependencies

View File

@@ -139,12 +139,12 @@ impl ServiceManager {
fn registry_service(&mut self) -> Result<()> {
if let Some(s) = self.server.take() {
let sandbox_service: Arc<dyn sandbox_async::Sandbox + Send + Sync> =
Arc::new(SandboxService::new(self.handler.clone()));
let sandbox_service = Arc::new(Box::new(SandboxService::new(self.handler.clone()))
as Box<dyn sandbox_async::Sandbox + Send + Sync>);
let s = s.register_service(sandbox_async::create_sandbox(sandbox_service));
let task_service: Arc<dyn shim_async::Task + Send + Sync> =
Arc::new(TaskService::new(self.handler.clone()));
let task_service = Arc::new(Box::new(TaskService::new(self.handler.clone()))
as Box<dyn shim_async::Task + Send + Sync>);
let s = s.register_service(shim_async::create_task(task_service));
self.server = Some(s);
}

View File

@@ -5,7 +5,7 @@ authors = { workspace = true }
description = "Containerd shim runtime for Kata Containers"
keywords = ["kata-containers", "shim"]
repository = "https://github.com/kata-containers/kata-containers.git"
license = { workspace = true }
license = { workspace = true }
edition = { workspace = true }
[[bin]]
@@ -14,28 +14,20 @@ path = "src/bin/main.rs"
[dependencies]
anyhow = { workspace = true }
backtrace = { version = ">=0.3.35", features = [
"libunwind",
"libbacktrace",
"std",
], default-features = false }
backtrace = {version = ">=0.3.35", features = ["libunwind", "libbacktrace", "std"], default-features = false}
containerd-shim-protos = { workspace = true }
go-flag = { workspace = true }
libc = { workspace = true }
log = { workspace = true }
nix = { workspace = true }
nix = { workspace = true }
protobuf = { workspace = true }
sha2 = "=0.9.3"
slog = { workspace = true, features = [
"std",
"release_max_level_trace",
"max_level_trace",
] }
slog = {workspace = true, features = ["std", "release_max_level_trace", "max_level_trace"]}
slog-async = "2.5.2"
slog-scope = { workspace = true }
slog-stdlog = "4.1.0"
thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
tokio = { workspace = true, features = [ "rt", "rt-multi-thread" ] }
unix_socket2 = "0.5.4"
tracing = { workspace = true }
tracing-opentelemetry = { workspace = true }
@@ -52,7 +44,7 @@ runtimes = { workspace = true }
[dev-dependencies]
tempfile = { workspace = true }
rand = { workspace = true }
serial_test = "0.10.0"
serial_test = "0.5.1"
# Local dev-dependencies
tests_utils = { workspace = true }

View File

@@ -13,7 +13,6 @@ use anyhow::{anyhow, Context, Result};
use nix::{
mount::{mount, MsFlags},
sched::{self, CloneFlags},
sys::signal::{signal, SigHandler, Signal},
};
use shim::{config, Args, Error, ShimExecutor};
@@ -160,17 +159,6 @@ fn real_main() -> Result<()> {
Ok(())
}
fn main() {
// When enabling systemd cgroup driver and sandbox cgroup only, the
// shim is under a systemd unit. When the unit is stopping, systemd
// sends SIGTERM to the shim. The shim can't exit immediately, as there
// are some cleanups to do. Therefore, ignoring SIGTERM is required
// here. The shim should complete the work within a period (Kata sets
// it to 300s by default). Once a timeout occurs, systemd will send
// SIGKILL.
unsafe {
signal(Signal::SIGTERM, SigHandler::SigIgn).unwrap();
}
if let Err(err) = real_main() {
show_version(Some(err));
}

View File

@@ -4,22 +4,31 @@
// SPDX-License-Identifier: Apache-2.0
//
use std::os::unix::fs::OpenOptionsExt;
use anyhow::{Context, Result};
pub(crate) fn set_logger(_path: &str, sid: &str, is_debug: bool) -> Result<slog_async::AsyncGuard> {
use crate::Error;
pub(crate) fn set_logger(path: &str, sid: &str, is_debug: bool) -> Result<slog_async::AsyncGuard> {
//it's better to open the log pipe file with read & write option,
//otherwise, once the containerd reboot and closed the read endpoint,
//kata shim would write the log pipe with broken pipe error.
let fifo = std::fs::OpenOptions::new()
.custom_flags(libc::O_NONBLOCK)
.create(true)
.read(true)
.write(true)
.open(path)
.context(Error::FileOpen(path.to_string()))?;
let level = if is_debug {
slog::Level::Debug
} else {
slog::Level::Info
};
// Use journal logger to send logs to systemd journal with "kata" identifier
let (logger, async_guard) = logging::create_logger_with_destination(
"kata-runtime",
sid,
level,
logging::LogDestination::Journal,
);
let (logger, async_guard) = logging::create_logger("kata-runtime", sid, level, fifo);
// not reset global logger when drop
slog_scope::set_global_logger(logger).cancel_reset();

View File

@@ -172,13 +172,8 @@ HYPERVISORS := $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVISOR_CLH) $(HYPERVISO
QEMUPATH := $(QEMUBINDIR)/$(QEMUCMD)
QEMUVALIDHYPERVISORPATHS := [\"$(QEMUPATH)\"]
#QEMUTDXPATH := $(QEMUBINDIR)/$(QEMUTDXCMD)
QEMUTDXPATH := PLACEHOLDER_FOR_DISTRO_QEMU_WITH_TDX_SUPPORT
QEMUTDXPATH := $(QEMUBINDIR)/$(QEMUTDXCMD)
QEMUTDXVALIDHYPERVISORPATHS := [\"$(QEMUTDXPATH)\"]
QEMUTDXEXPERIMENTALPATH := $(QEMUBINDIR)/$(QEMUTDXEXPERIMENTALCMD)
QEMUTDXEXPERIMENTALVALIDHYPERVISORPATHS := [\"$(QEMUTDXEXPERIMENTALPATH)\"]
QEMUTDXQUOTEGENERATIONSERVICESOCKETPORT := 4050
QEMUSNPPATH := $(QEMUBINDIR)/$(QEMUSNPCMD)
@@ -678,16 +673,13 @@ USER_VARS += PROJECT_URL
USER_VARS += QEMUBINDIR
USER_VARS += QEMUCMD
USER_VARS += QEMUTDXCMD
USER_VARS += QEMUTDXEXPERIMENTALCMD
USER_VARS += QEMUSNPCMD
USER_VARS += QEMUPATH
USER_VARS += QEMUTDXPATH
USER_VARS += QEMUTDXEXPERIMENTALPATH
USER_VARS += QEMUTDXQUOTEGENERATIONSERVICESOCKETPORT
USER_VARS += QEMUSNPPATH
USER_VARS += QEMUVALIDHYPERVISORPATHS
USER_VARS += QEMUTDXVALIDHYPERVISORPATHS
USER_VARS += QEMUTDXEXPERIMENTALVALIDHYPERVISORPATHS
USER_VARS += QEMUSNPVALIDHYPERVISORPATHS
USER_VARS += QEMUVIRTIOFSCMD
USER_VARS += QEMUVIRTIOFSPATH

View File

@@ -12,8 +12,7 @@ MACHINEACCELERATORS :=
CPUFEATURES := pmu=off
QEMUCMD := qemu-system-x86_64
#QEMUTDXCMD := qemu-system-x86_64
QEMUTDXEXPERIMENTALCMD := qemu-system-x86_64-tdx-experimental
QEMUTDXCMD := qemu-system-x86_64-tdx-experimental
QEMUSNPCMD := qemu-system-x86_64-snp-experimental
TDXCPUFEATURES := pmu=off

View File

@@ -163,10 +163,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -212,10 +212,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -221,10 +221,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -12,7 +12,7 @@
# XXX: Type: @PROJECT_TYPE@
[hypervisor.qemu]
path = "@QEMUTDXEXPERIMENTALPATH@"
path = "@QEMUTDXPATH@"
kernel = "@KERNELPATH_CONFIDENTIAL_NV@"
initrd = "@INITRDPATH_CONFIDENTIAL_NV@"
@@ -54,7 +54,7 @@ enable_annotations = @DEFENABLEANNOTATIONS@
# Each member of the list is a path pattern as described by glob(3).
# The default if not set is empty (all annotations rejected.)
# Your distribution recommends: @QEMUVALIDHYPERVISORPATHS@
valid_hypervisor_paths = @QEMUTDXEXPERIMENTALVALIDHYPERVISORPATHS@
valid_hypervisor_paths = @QEMUTDXVALIDHYPERVISORPATHS@
# Optional space-separated list of options to pass to the guest kernel.
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
@@ -205,10 +205,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -210,10 +210,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -196,10 +196,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -221,10 +221,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -206,10 +206,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -211,10 +211,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

View File

@@ -144,10 +144,6 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - metadata
# Metadata and pathname lookup are cached in guest and never expire.
# Data is never cached in guest.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close

Some files were not shown because too many files have changed in this diff Show More